RISC-V: Support assembler modifier %got_pcrel_hi.
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2020 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35 #include "cpu-arm.h"
36
37 #ifdef OBJ_ELF
38 #include "elf/arm.h"
39 #include "dw2gencfi.h"
40 #endif
41
42 #include "dwarf2dbg.h"
43
44 #ifdef OBJ_ELF
45 /* Must be at least the size of the largest unwind opcode (currently two). */
46 #define ARM_OPCODE_CHUNK_SIZE 8
47
48 /* This structure holds the unwinding state. */
49
50 static struct
51 {
52 symbolS * proc_start;
53 symbolS * table_entry;
54 symbolS * personality_routine;
55 int personality_index;
56 /* The segment containing the function. */
57 segT saved_seg;
58 subsegT saved_subseg;
59 /* Opcodes generated from this function. */
60 unsigned char * opcodes;
61 int opcode_count;
62 int opcode_alloc;
63 /* The number of bytes pushed to the stack. */
64 offsetT frame_size;
65 /* We don't add stack adjustment opcodes immediately so that we can merge
66 multiple adjustments. We can also omit the final adjustment
67 when using a frame pointer. */
68 offsetT pending_offset;
69 /* These two fields are set by both unwind_movsp and unwind_setfp. They
70 hold the reg+offset to use when restoring sp from a frame pointer. */
71 offsetT fp_offset;
72 int fp_reg;
73 /* Nonzero if an unwind_setfp directive has been seen. */
74 unsigned fp_used:1;
75 /* Nonzero if the last opcode restores sp from fp_reg. */
76 unsigned sp_restored:1;
77 } unwind;
78
79 /* Whether --fdpic was given. */
80 static int arm_fdpic;
81
82 #endif /* OBJ_ELF */
83
84 /* Results from operand parsing worker functions. */
85
86 typedef enum
87 {
88 PARSE_OPERAND_SUCCESS,
89 PARSE_OPERAND_FAIL,
90 PARSE_OPERAND_FAIL_NO_BACKTRACK
91 } parse_operand_result;
92
93 enum arm_float_abi
94 {
95 ARM_FLOAT_ABI_HARD,
96 ARM_FLOAT_ABI_SOFTFP,
97 ARM_FLOAT_ABI_SOFT
98 };
99
100 /* Types of processor to assemble for. */
101 #ifndef CPU_DEFAULT
102 /* The code that was here used to select a default CPU depending on compiler
103 pre-defines which were only present when doing native builds, thus
104 changing gas' default behaviour depending upon the build host.
105
106 If you have a target that requires a default CPU option then the you
107 should define CPU_DEFAULT here. */
108 #endif
109
110 /* Perform range checks on positive and negative overflows by checking if the
111 VALUE given fits within the range of an BITS sized immediate. */
112 static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
113 {
114 gas_assert (bits < (offsetT)(sizeof (value) * 8));
115 return (value & ~((1 << bits)-1))
116 && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117 }
118
119 #ifndef FPU_DEFAULT
120 # ifdef TE_LINUX
121 # define FPU_DEFAULT FPU_ARCH_FPA
122 # elif defined (TE_NetBSD)
123 # ifdef OBJ_ELF
124 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
125 # else
126 /* Legacy a.out format. */
127 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
128 # endif
129 # elif defined (TE_VXWORKS)
130 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
131 # else
132 /* For backwards compatibility, default to FPA. */
133 # define FPU_DEFAULT FPU_ARCH_FPA
134 # endif
135 #endif /* ifndef FPU_DEFAULT */
136
137 #define streq(a, b) (strcmp (a, b) == 0)
138
139 /* Current set of feature bits available (CPU+FPU). Different from
140 selected_cpu + selected_fpu in case of autodetection since the CPU
141 feature bits are then all set. */
142 static arm_feature_set cpu_variant;
143 /* Feature bits used in each execution state. Used to set build attribute
144 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
145 static arm_feature_set arm_arch_used;
146 static arm_feature_set thumb_arch_used;
147
148 /* Flags stored in private area of BFD structure. */
149 static int uses_apcs_26 = FALSE;
150 static int atpcs = FALSE;
151 static int support_interwork = FALSE;
152 static int uses_apcs_float = FALSE;
153 static int pic_code = FALSE;
154 static int fix_v4bx = FALSE;
155 /* Warn on using deprecated features. */
156 static int warn_on_deprecated = TRUE;
157 static int warn_on_restrict_it = FALSE;
158
159 /* Understand CodeComposer Studio assembly syntax. */
160 bfd_boolean codecomposer_syntax = FALSE;
161
162 /* Variables that we set while parsing command-line options. Once all
163 options have been read we re-process these values to set the real
164 assembly flags. */
165
166 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167 instead of -mcpu=arm1). */
168 static const arm_feature_set *legacy_cpu = NULL;
169 static const arm_feature_set *legacy_fpu = NULL;
170
171 /* CPU, extension and FPU feature bits selected by -mcpu. */
172 static const arm_feature_set *mcpu_cpu_opt = NULL;
173 static arm_feature_set *mcpu_ext_opt = NULL;
174 static const arm_feature_set *mcpu_fpu_opt = NULL;
175
176 /* CPU, extension and FPU feature bits selected by -march. */
177 static const arm_feature_set *march_cpu_opt = NULL;
178 static arm_feature_set *march_ext_opt = NULL;
179 static const arm_feature_set *march_fpu_opt = NULL;
180
181 /* Feature bits selected by -mfpu. */
182 static const arm_feature_set *mfpu_opt = NULL;
183
184 /* Constants for known architecture features. */
185 static const arm_feature_set fpu_default = FPU_DEFAULT;
186 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
187 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
188 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
189 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
190 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
191 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
192 #ifdef OBJ_ELF
193 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
194 #endif
195 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
196
197 #ifdef CPU_DEFAULT
198 static const arm_feature_set cpu_default = CPU_DEFAULT;
199 #endif
200
201 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
202 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
203 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
204 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
205 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
206 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
207 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
208 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
209 static const arm_feature_set arm_ext_v4t_5 =
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
211 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
212 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
213 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
214 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
215 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
216 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
217 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
218 /* Only for compatability of hint instructions. */
219 static const arm_feature_set arm_ext_v6k_v6t2 =
220 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
221 static const arm_feature_set arm_ext_v6_notm =
222 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
223 static const arm_feature_set arm_ext_v6_dsp =
224 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
225 static const arm_feature_set arm_ext_barrier =
226 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
227 static const arm_feature_set arm_ext_msr =
228 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
229 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
230 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
231 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
232 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
233 #ifdef OBJ_ELF
234 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
235 #endif
236 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
237 static const arm_feature_set arm_ext_m =
238 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
239 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
240 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
241 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
242 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
243 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
244 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
245 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
246 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
247 static const arm_feature_set arm_ext_v8m_main =
248 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
249 static const arm_feature_set arm_ext_v8_1m_main =
250 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
251 /* Instructions in ARMv8-M only found in M profile architectures. */
252 static const arm_feature_set arm_ext_v8m_m_only =
253 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
254 static const arm_feature_set arm_ext_v6t2_v8m =
255 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
256 /* Instructions shared between ARMv8-A and ARMv8-M. */
257 static const arm_feature_set arm_ext_atomics =
258 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
259 #ifdef OBJ_ELF
260 /* DSP instructions Tag_DSP_extension refers to. */
261 static const arm_feature_set arm_ext_dsp =
262 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
263 #endif
264 static const arm_feature_set arm_ext_ras =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
266 /* FP16 instructions. */
267 static const arm_feature_set arm_ext_fp16 =
268 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
269 static const arm_feature_set arm_ext_fp16_fml =
270 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
271 static const arm_feature_set arm_ext_v8_2 =
272 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
273 static const arm_feature_set arm_ext_v8_3 =
274 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
275 static const arm_feature_set arm_ext_sb =
276 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
277 static const arm_feature_set arm_ext_predres =
278 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
279 static const arm_feature_set arm_ext_bf16 =
280 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
281 static const arm_feature_set arm_ext_i8mm =
282 ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
283 static const arm_feature_set arm_ext_crc =
284 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
285 static const arm_feature_set arm_ext_cde =
286 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE);
287 static const arm_feature_set arm_ext_cde0 =
288 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0);
289 static const arm_feature_set arm_ext_cde1 =
290 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1);
291 static const arm_feature_set arm_ext_cde2 =
292 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2);
293 static const arm_feature_set arm_ext_cde3 =
294 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3);
295 static const arm_feature_set arm_ext_cde4 =
296 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4);
297 static const arm_feature_set arm_ext_cde5 =
298 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5);
299 static const arm_feature_set arm_ext_cde6 =
300 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6);
301 static const arm_feature_set arm_ext_cde7 =
302 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7);
303
304 static const arm_feature_set arm_arch_any = ARM_ANY;
305 static const arm_feature_set fpu_any = FPU_ANY;
306 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
307 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
308 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
309
310 static const arm_feature_set arm_cext_iwmmxt2 =
311 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
312 static const arm_feature_set arm_cext_iwmmxt =
313 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
314 static const arm_feature_set arm_cext_xscale =
315 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
316 static const arm_feature_set arm_cext_maverick =
317 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
318 static const arm_feature_set fpu_fpa_ext_v1 =
319 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
320 static const arm_feature_set fpu_fpa_ext_v2 =
321 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
322 static const arm_feature_set fpu_vfp_ext_v1xd =
323 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
324 static const arm_feature_set fpu_vfp_ext_v1 =
325 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
326 static const arm_feature_set fpu_vfp_ext_v2 =
327 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
328 static const arm_feature_set fpu_vfp_ext_v3xd =
329 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
330 static const arm_feature_set fpu_vfp_ext_v3 =
331 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
332 static const arm_feature_set fpu_vfp_ext_d32 =
333 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
334 static const arm_feature_set fpu_neon_ext_v1 =
335 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
336 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
337 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
338 static const arm_feature_set mve_ext =
339 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
340 static const arm_feature_set mve_fp_ext =
341 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
342 /* Note: This has more than one bit set, which means using it with
343 mark_feature_used (which returns if *any* of the bits are set in the current
344 cpu variant) can give surprising results. */
345 static const arm_feature_set armv8m_fp =
346 ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16);
347 #ifdef OBJ_ELF
348 static const arm_feature_set fpu_vfp_fp16 =
349 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
350 static const arm_feature_set fpu_neon_ext_fma =
351 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
352 #endif
353 static const arm_feature_set fpu_vfp_ext_fma =
354 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
355 static const arm_feature_set fpu_vfp_ext_armv8 =
356 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
357 static const arm_feature_set fpu_vfp_ext_armv8xd =
358 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
359 static const arm_feature_set fpu_neon_ext_armv8 =
360 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
361 static const arm_feature_set fpu_crypto_ext_armv8 =
362 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
363 static const arm_feature_set fpu_neon_ext_v8_1 =
364 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
365 static const arm_feature_set fpu_neon_ext_dotprod =
366 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
367
368 static int mfloat_abi_opt = -1;
369 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
370 directive. */
371 static arm_feature_set selected_arch = ARM_ARCH_NONE;
372 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
373 directive. */
374 static arm_feature_set selected_ext = ARM_ARCH_NONE;
375 /* Feature bits selected by the last -mcpu/-march or by the combination of the
376 last .cpu/.arch directive .arch_extension directives since that
377 directive. */
378 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
379 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
380 static arm_feature_set selected_fpu = FPU_NONE;
381 /* Feature bits selected by the last .object_arch directive. */
382 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
383 /* Must be long enough to hold any of the names in arm_cpus. */
384 static const struct arm_ext_table * selected_ctx_ext_table = NULL;
385 static char selected_cpu_name[20];
386
387 extern FLONUM_TYPE generic_floating_point_number;
388
389 /* Return if no cpu was selected on command-line. */
390 static bfd_boolean
391 no_cpu_selected (void)
392 {
393 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
394 }
395
396 #ifdef OBJ_ELF
397 # ifdef EABI_DEFAULT
398 static int meabi_flags = EABI_DEFAULT;
399 # else
400 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
401 # endif
402
403 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
404
405 bfd_boolean
406 arm_is_eabi (void)
407 {
408 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
409 }
410 #endif
411
412 #ifdef OBJ_ELF
413 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
414 symbolS * GOT_symbol;
415 #endif
416
417 /* 0: assemble for ARM,
418 1: assemble for Thumb,
419 2: assemble for Thumb even though target CPU does not support thumb
420 instructions. */
421 static int thumb_mode = 0;
422 /* A value distinct from the possible values for thumb_mode that we
423 can use to record whether thumb_mode has been copied into the
424 tc_frag_data field of a frag. */
425 #define MODE_RECORDED (1 << 4)
426
427 /* Specifies the intrinsic IT insn behavior mode. */
428 enum implicit_it_mode
429 {
430 IMPLICIT_IT_MODE_NEVER = 0x00,
431 IMPLICIT_IT_MODE_ARM = 0x01,
432 IMPLICIT_IT_MODE_THUMB = 0x02,
433 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
434 };
435 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
436
437 /* If unified_syntax is true, we are processing the new unified
438 ARM/Thumb syntax. Important differences from the old ARM mode:
439
440 - Immediate operands do not require a # prefix.
441 - Conditional affixes always appear at the end of the
442 instruction. (For backward compatibility, those instructions
443 that formerly had them in the middle, continue to accept them
444 there.)
445 - The IT instruction may appear, and if it does is validated
446 against subsequent conditional affixes. It does not generate
447 machine code.
448
449 Important differences from the old Thumb mode:
450
451 - Immediate operands do not require a # prefix.
452 - Most of the V6T2 instructions are only available in unified mode.
453 - The .N and .W suffixes are recognized and honored (it is an error
454 if they cannot be honored).
455 - All instructions set the flags if and only if they have an 's' affix.
456 - Conditional affixes may be used. They are validated against
457 preceding IT instructions. Unlike ARM mode, you cannot use a
458 conditional affix except in the scope of an IT instruction. */
459
460 static bfd_boolean unified_syntax = FALSE;
461
462 /* An immediate operand can start with #, and ld*, st*, pld operands
463 can contain [ and ]. We need to tell APP not to elide whitespace
464 before a [, which can appear as the first operand for pld.
465 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
466 const char arm_symbol_chars[] = "#[]{}";
467
468 enum neon_el_type
469 {
470 NT_invtype,
471 NT_untyped,
472 NT_integer,
473 NT_float,
474 NT_poly,
475 NT_signed,
476 NT_bfloat,
477 NT_unsigned
478 };
479
480 struct neon_type_el
481 {
482 enum neon_el_type type;
483 unsigned size;
484 };
485
486 #define NEON_MAX_TYPE_ELS 5
487
488 struct neon_type
489 {
490 struct neon_type_el el[NEON_MAX_TYPE_ELS];
491 unsigned elems;
492 };
493
494 enum pred_instruction_type
495 {
496 OUTSIDE_PRED_INSN,
497 INSIDE_VPT_INSN,
498 INSIDE_IT_INSN,
499 INSIDE_IT_LAST_INSN,
500 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
501 if inside, should be the last one. */
502 NEUTRAL_IT_INSN, /* This could be either inside or outside,
503 i.e. BKPT and NOP. */
504 IT_INSN, /* The IT insn has been parsed. */
505 VPT_INSN, /* The VPT/VPST insn has been parsed. */
506 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
507 a predication code. */
508 MVE_UNPREDICABLE_INSN, /* MVE instruction that is non-predicable. */
509 };
510
511 /* The maximum number of operands we need. */
512 #define ARM_IT_MAX_OPERANDS 6
513 #define ARM_IT_MAX_RELOCS 3
514
515 struct arm_it
516 {
517 const char * error;
518 unsigned long instruction;
519 int size;
520 int size_req;
521 int cond;
522 /* "uncond_value" is set to the value in place of the conditional field in
523 unconditional versions of the instruction, or -1 if nothing is
524 appropriate. */
525 int uncond_value;
526 struct neon_type vectype;
527 /* This does not indicate an actual NEON instruction, only that
528 the mnemonic accepts neon-style type suffixes. */
529 int is_neon;
530 /* Set to the opcode if the instruction needs relaxation.
531 Zero if the instruction is not relaxed. */
532 unsigned long relax;
533 struct
534 {
535 bfd_reloc_code_real_type type;
536 expressionS exp;
537 int pc_rel;
538 } relocs[ARM_IT_MAX_RELOCS];
539
540 enum pred_instruction_type pred_insn_type;
541
542 struct
543 {
544 unsigned reg;
545 signed int imm;
546 struct neon_type_el vectype;
547 unsigned present : 1; /* Operand present. */
548 unsigned isreg : 1; /* Operand was a register. */
549 unsigned immisreg : 2; /* .imm field is a second register.
550 0: imm, 1: gpr, 2: MVE Q-register. */
551 unsigned isscalar : 2; /* Operand is a (SIMD) scalar:
552 0) not scalar,
553 1) Neon scalar,
554 2) MVE scalar. */
555 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
556 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
557 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
558 instructions. This allows us to disambiguate ARM <-> vector insns. */
559 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
560 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
561 unsigned isquad : 1; /* Operand is SIMD quad register. */
562 unsigned issingle : 1; /* Operand is VFP single-precision register. */
563 unsigned iszr : 1; /* Operand is ZR register. */
564 unsigned hasreloc : 1; /* Operand has relocation suffix. */
565 unsigned writeback : 1; /* Operand has trailing ! */
566 unsigned preind : 1; /* Preindexed address. */
567 unsigned postind : 1; /* Postindexed address. */
568 unsigned negative : 1; /* Index register was negated. */
569 unsigned shifted : 1; /* Shift applied to operation. */
570 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
571 } operands[ARM_IT_MAX_OPERANDS];
572 };
573
574 static struct arm_it inst;
575
576 #define NUM_FLOAT_VALS 8
577
578 const char * fp_const[] =
579 {
580 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
581 };
582
583 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
584
585 #define FAIL (-1)
586 #define SUCCESS (0)
587
588 #define SUFF_S 1
589 #define SUFF_D 2
590 #define SUFF_E 3
591 #define SUFF_P 4
592
593 #define CP_T_X 0x00008000
594 #define CP_T_Y 0x00400000
595
596 #define CONDS_BIT 0x00100000
597 #define LOAD_BIT 0x00100000
598
599 #define DOUBLE_LOAD_FLAG 0x00000001
600
601 struct asm_cond
602 {
603 const char * template_name;
604 unsigned long value;
605 };
606
607 #define COND_ALWAYS 0xE
608
609 struct asm_psr
610 {
611 const char * template_name;
612 unsigned long field;
613 };
614
615 struct asm_barrier_opt
616 {
617 const char * template_name;
618 unsigned long value;
619 const arm_feature_set arch;
620 };
621
622 /* The bit that distinguishes CPSR and SPSR. */
623 #define SPSR_BIT (1 << 22)
624
625 /* The individual PSR flag bits. */
626 #define PSR_c (1 << 16)
627 #define PSR_x (1 << 17)
628 #define PSR_s (1 << 18)
629 #define PSR_f (1 << 19)
630
631 struct reloc_entry
632 {
633 const char * name;
634 bfd_reloc_code_real_type reloc;
635 };
636
637 enum vfp_reg_pos
638 {
639 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
640 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
641 };
642
643 enum vfp_ldstm_type
644 {
645 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
646 };
647
648 /* Bits for DEFINED field in neon_typed_alias. */
649 #define NTA_HASTYPE 1
650 #define NTA_HASINDEX 2
651
652 struct neon_typed_alias
653 {
654 unsigned char defined;
655 unsigned char index;
656 struct neon_type_el eltype;
657 };
658
659 /* ARM register categories. This includes coprocessor numbers and various
660 architecture extensions' registers. Each entry should have an error message
661 in reg_expected_msgs below. */
662 enum arm_reg_type
663 {
664 REG_TYPE_RN,
665 REG_TYPE_CP,
666 REG_TYPE_CN,
667 REG_TYPE_FN,
668 REG_TYPE_VFS,
669 REG_TYPE_VFD,
670 REG_TYPE_NQ,
671 REG_TYPE_VFSD,
672 REG_TYPE_NDQ,
673 REG_TYPE_NSD,
674 REG_TYPE_NSDQ,
675 REG_TYPE_VFC,
676 REG_TYPE_MVF,
677 REG_TYPE_MVD,
678 REG_TYPE_MVFX,
679 REG_TYPE_MVDX,
680 REG_TYPE_MVAX,
681 REG_TYPE_MQ,
682 REG_TYPE_DSPSC,
683 REG_TYPE_MMXWR,
684 REG_TYPE_MMXWC,
685 REG_TYPE_MMXWCG,
686 REG_TYPE_XSCALE,
687 REG_TYPE_RNB,
688 REG_TYPE_ZR
689 };
690
691 /* Structure for a hash table entry for a register.
692 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
693 information which states whether a vector type or index is specified (for a
694 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
695 struct reg_entry
696 {
697 const char * name;
698 unsigned int number;
699 unsigned char type;
700 unsigned char builtin;
701 struct neon_typed_alias * neon;
702 };
703
704 /* Diagnostics used when we don't get a register of the expected type. */
705 const char * const reg_expected_msgs[] =
706 {
707 [REG_TYPE_RN] = N_("ARM register expected"),
708 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
709 [REG_TYPE_CN] = N_("co-processor register expected"),
710 [REG_TYPE_FN] = N_("FPA register expected"),
711 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
712 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
713 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
714 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
715 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
716 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
717 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
718 " expected"),
719 [REG_TYPE_VFC] = N_("VFP system register expected"),
720 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
721 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
722 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
723 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
724 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
725 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
726 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
727 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
728 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
729 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
730 [REG_TYPE_MQ] = N_("MVE vector register expected"),
731 [REG_TYPE_RNB] = ""
732 };
733
734 /* Some well known registers that we refer to directly elsewhere. */
735 #define REG_R12 12
736 #define REG_SP 13
737 #define REG_LR 14
738 #define REG_PC 15
739
740 /* ARM instructions take 4bytes in the object file, Thumb instructions
741 take 2: */
742 #define INSN_SIZE 4
743
744 struct asm_opcode
745 {
746 /* Basic string to match. */
747 const char * template_name;
748
749 /* Parameters to instruction. */
750 unsigned int operands[8];
751
752 /* Conditional tag - see opcode_lookup. */
753 unsigned int tag : 4;
754
755 /* Basic instruction code. */
756 unsigned int avalue;
757
758 /* Thumb-format instruction code. */
759 unsigned int tvalue;
760
761 /* Which architecture variant provides this instruction. */
762 const arm_feature_set * avariant;
763 const arm_feature_set * tvariant;
764
765 /* Function to call to encode instruction in ARM format. */
766 void (* aencode) (void);
767
768 /* Function to call to encode instruction in Thumb format. */
769 void (* tencode) (void);
770
771 /* Indicates whether this instruction may be vector predicated. */
772 unsigned int mayBeVecPred : 1;
773 };
774
775 /* Defines for various bits that we will want to toggle. */
776 #define INST_IMMEDIATE 0x02000000
777 #define OFFSET_REG 0x02000000
778 #define HWOFFSET_IMM 0x00400000
779 #define SHIFT_BY_REG 0x00000010
780 #define PRE_INDEX 0x01000000
781 #define INDEX_UP 0x00800000
782 #define WRITE_BACK 0x00200000
783 #define LDM_TYPE_2_OR_3 0x00400000
784 #define CPSI_MMOD 0x00020000
785
786 #define LITERAL_MASK 0xf000f000
787 #define OPCODE_MASK 0xfe1fffff
788 #define V4_STR_BIT 0x00000020
789 #define VLDR_VMOV_SAME 0x0040f000
790
791 #define T2_SUBS_PC_LR 0xf3de8f00
792
793 #define DATA_OP_SHIFT 21
794 #define SBIT_SHIFT 20
795
796 #define T2_OPCODE_MASK 0xfe1fffff
797 #define T2_DATA_OP_SHIFT 21
798 #define T2_SBIT_SHIFT 20
799
800 #define A_COND_MASK 0xf0000000
801 #define A_PUSH_POP_OP_MASK 0x0fff0000
802
803 /* Opcodes for pushing/poping registers to/from the stack. */
804 #define A1_OPCODE_PUSH 0x092d0000
805 #define A2_OPCODE_PUSH 0x052d0004
806 #define A2_OPCODE_POP 0x049d0004
807
808 /* Codes to distinguish the arithmetic instructions. */
809 #define OPCODE_AND 0
810 #define OPCODE_EOR 1
811 #define OPCODE_SUB 2
812 #define OPCODE_RSB 3
813 #define OPCODE_ADD 4
814 #define OPCODE_ADC 5
815 #define OPCODE_SBC 6
816 #define OPCODE_RSC 7
817 #define OPCODE_TST 8
818 #define OPCODE_TEQ 9
819 #define OPCODE_CMP 10
820 #define OPCODE_CMN 11
821 #define OPCODE_ORR 12
822 #define OPCODE_MOV 13
823 #define OPCODE_BIC 14
824 #define OPCODE_MVN 15
825
826 #define T2_OPCODE_AND 0
827 #define T2_OPCODE_BIC 1
828 #define T2_OPCODE_ORR 2
829 #define T2_OPCODE_ORN 3
830 #define T2_OPCODE_EOR 4
831 #define T2_OPCODE_ADD 8
832 #define T2_OPCODE_ADC 10
833 #define T2_OPCODE_SBC 11
834 #define T2_OPCODE_SUB 13
835 #define T2_OPCODE_RSB 14
836
837 #define T_OPCODE_MUL 0x4340
838 #define T_OPCODE_TST 0x4200
839 #define T_OPCODE_CMN 0x42c0
840 #define T_OPCODE_NEG 0x4240
841 #define T_OPCODE_MVN 0x43c0
842
843 #define T_OPCODE_ADD_R3 0x1800
844 #define T_OPCODE_SUB_R3 0x1a00
845 #define T_OPCODE_ADD_HI 0x4400
846 #define T_OPCODE_ADD_ST 0xb000
847 #define T_OPCODE_SUB_ST 0xb080
848 #define T_OPCODE_ADD_SP 0xa800
849 #define T_OPCODE_ADD_PC 0xa000
850 #define T_OPCODE_ADD_I8 0x3000
851 #define T_OPCODE_SUB_I8 0x3800
852 #define T_OPCODE_ADD_I3 0x1c00
853 #define T_OPCODE_SUB_I3 0x1e00
854
855 #define T_OPCODE_ASR_R 0x4100
856 #define T_OPCODE_LSL_R 0x4080
857 #define T_OPCODE_LSR_R 0x40c0
858 #define T_OPCODE_ROR_R 0x41c0
859 #define T_OPCODE_ASR_I 0x1000
860 #define T_OPCODE_LSL_I 0x0000
861 #define T_OPCODE_LSR_I 0x0800
862
863 #define T_OPCODE_MOV_I8 0x2000
864 #define T_OPCODE_CMP_I8 0x2800
865 #define T_OPCODE_CMP_LR 0x4280
866 #define T_OPCODE_MOV_HR 0x4600
867 #define T_OPCODE_CMP_HR 0x4500
868
869 #define T_OPCODE_LDR_PC 0x4800
870 #define T_OPCODE_LDR_SP 0x9800
871 #define T_OPCODE_STR_SP 0x9000
872 #define T_OPCODE_LDR_IW 0x6800
873 #define T_OPCODE_STR_IW 0x6000
874 #define T_OPCODE_LDR_IH 0x8800
875 #define T_OPCODE_STR_IH 0x8000
876 #define T_OPCODE_LDR_IB 0x7800
877 #define T_OPCODE_STR_IB 0x7000
878 #define T_OPCODE_LDR_RW 0x5800
879 #define T_OPCODE_STR_RW 0x5000
880 #define T_OPCODE_LDR_RH 0x5a00
881 #define T_OPCODE_STR_RH 0x5200
882 #define T_OPCODE_LDR_RB 0x5c00
883 #define T_OPCODE_STR_RB 0x5400
884
885 #define T_OPCODE_PUSH 0xb400
886 #define T_OPCODE_POP 0xbc00
887
888 #define T_OPCODE_BRANCH 0xe000
889
890 #define THUMB_SIZE 2 /* Size of thumb instruction. */
891 #define THUMB_PP_PC_LR 0x0100
892 #define THUMB_LOAD_BIT 0x0800
893 #define THUMB2_LOAD_BIT 0x00100000
894
895 #define BAD_SYNTAX _("syntax error")
896 #define BAD_ARGS _("bad arguments to instruction")
897 #define BAD_SP _("r13 not allowed here")
898 #define BAD_PC _("r15 not allowed here")
899 #define BAD_ODD _("Odd register not allowed here")
900 #define BAD_EVEN _("Even register not allowed here")
901 #define BAD_COND _("instruction cannot be conditional")
902 #define BAD_OVERLAP _("registers may not be the same")
903 #define BAD_HIREG _("lo register required")
904 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
905 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
906 #define BAD_BRANCH _("branch must be last instruction in IT block")
907 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
908 #define BAD_NO_VPT _("instruction not allowed in VPT block")
909 #define BAD_NOT_IT _("instruction not allowed in IT block")
910 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
911 #define BAD_FPU _("selected FPU does not support instruction")
912 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
913 #define BAD_OUT_VPT \
914 _("vector predicated instruction should be in VPT/VPST block")
915 #define BAD_IT_COND _("incorrect condition in IT block")
916 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
917 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
918 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
919 #define BAD_PC_ADDRESSING \
920 _("cannot use register index with PC-relative addressing")
921 #define BAD_PC_WRITEBACK \
922 _("cannot use writeback with PC-relative addressing")
923 #define BAD_RANGE _("branch out of range")
924 #define BAD_FP16 _("selected processor does not support fp16 instruction")
925 #define BAD_BF16 _("selected processor does not support bf16 instruction")
926 #define BAD_CDE _("selected processor does not support cde instruction")
927 #define BAD_CDE_COPROC _("coprocessor for insn is not enabled for cde")
928 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
929 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
930 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
931 "block")
932 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
933 "block")
934 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
935 " operand")
936 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
937 " operand")
938 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
939 #define BAD_MVE_AUTO \
940 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
941 " use a valid -march or -mcpu option.")
942 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
943 "and source operands makes instruction UNPREDICTABLE")
944 #define BAD_EL_TYPE _("bad element type for instruction")
945 #define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
946
947 static struct hash_control * arm_ops_hsh;
948 static struct hash_control * arm_cond_hsh;
949 static struct hash_control * arm_vcond_hsh;
950 static struct hash_control * arm_shift_hsh;
951 static struct hash_control * arm_psr_hsh;
952 static struct hash_control * arm_v7m_psr_hsh;
953 static struct hash_control * arm_reg_hsh;
954 static struct hash_control * arm_reloc_hsh;
955 static struct hash_control * arm_barrier_opt_hsh;
956
957 /* Stuff needed to resolve the label ambiguity
958 As:
959 ...
960 label: <insn>
961 may differ from:
962 ...
963 label:
964 <insn> */
965
966 symbolS * last_label_seen;
967 static int label_is_thumb_function_name = FALSE;
968
969 /* Literal pool structure. Held on a per-section
970 and per-sub-section basis. */
971
972 #define MAX_LITERAL_POOL_SIZE 1024
973 typedef struct literal_pool
974 {
975 expressionS literals [MAX_LITERAL_POOL_SIZE];
976 unsigned int next_free_entry;
977 unsigned int id;
978 symbolS * symbol;
979 segT section;
980 subsegT sub_section;
981 #ifdef OBJ_ELF
982 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
983 #endif
984 struct literal_pool * next;
985 unsigned int alignment;
986 } literal_pool;
987
988 /* Pointer to a linked list of literal pools. */
989 literal_pool * list_of_pools = NULL;
990
991 typedef enum asmfunc_states
992 {
993 OUTSIDE_ASMFUNC,
994 WAITING_ASMFUNC_NAME,
995 WAITING_ENDASMFUNC
996 } asmfunc_states;
997
998 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
999
1000 #ifdef OBJ_ELF
1001 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
1002 #else
1003 static struct current_pred now_pred;
1004 #endif
1005
1006 static inline int
1007 now_pred_compatible (int cond)
1008 {
1009 return (cond & ~1) == (now_pred.cc & ~1);
1010 }
1011
1012 static inline int
1013 conditional_insn (void)
1014 {
1015 return inst.cond != COND_ALWAYS;
1016 }
1017
1018 static int in_pred_block (void);
1019
1020 static int handle_pred_state (void);
1021
1022 static void force_automatic_it_block_close (void);
1023
1024 static void it_fsm_post_encode (void);
1025
1026 #define set_pred_insn_type(type) \
1027 do \
1028 { \
1029 inst.pred_insn_type = type; \
1030 if (handle_pred_state () == FAIL) \
1031 return; \
1032 } \
1033 while (0)
1034
1035 #define set_pred_insn_type_nonvoid(type, failret) \
1036 do \
1037 { \
1038 inst.pred_insn_type = type; \
1039 if (handle_pred_state () == FAIL) \
1040 return failret; \
1041 } \
1042 while(0)
1043
1044 #define set_pred_insn_type_last() \
1045 do \
1046 { \
1047 if (inst.cond == COND_ALWAYS) \
1048 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1049 else \
1050 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1051 } \
1052 while (0)
1053
1054 /* Toggle value[pos]. */
1055 #define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1056
1057 /* Pure syntax. */
1058
1059 /* This array holds the chars that always start a comment. If the
1060 pre-processor is disabled, these aren't very useful. */
1061 char arm_comment_chars[] = "@";
1062
1063 /* This array holds the chars that only start a comment at the beginning of
1064 a line. If the line seems to have the form '# 123 filename'
1065 .line and .file directives will appear in the pre-processed output. */
1066 /* Note that input_file.c hand checks for '#' at the beginning of the
1067 first line of the input file. This is because the compiler outputs
1068 #NO_APP at the beginning of its output. */
1069 /* Also note that comments like this one will always work. */
1070 const char line_comment_chars[] = "#";
1071
1072 char arm_line_separator_chars[] = ";";
1073
1074 /* Chars that can be used to separate mant
1075 from exp in floating point numbers. */
1076 const char EXP_CHARS[] = "eE";
1077
1078 /* Chars that mean this number is a floating point constant. */
1079 /* As in 0f12.456 */
1080 /* or 0d1.2345e12 */
1081
1082 const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1083
1084 /* Prefix characters that indicate the start of an immediate
1085 value. */
1086 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1087
1088 /* Separator character handling. */
1089
1090 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1091
1092 enum fp_16bit_format
1093 {
1094 ARM_FP16_FORMAT_IEEE = 0x1,
1095 ARM_FP16_FORMAT_ALTERNATIVE = 0x2,
1096 ARM_FP16_FORMAT_DEFAULT = 0x3
1097 };
1098
1099 static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1100
1101
1102 static inline int
1103 skip_past_char (char ** str, char c)
1104 {
1105 /* PR gas/14987: Allow for whitespace before the expected character. */
1106 skip_whitespace (*str);
1107
1108 if (**str == c)
1109 {
1110 (*str)++;
1111 return SUCCESS;
1112 }
1113 else
1114 return FAIL;
1115 }
1116
1117 #define skip_past_comma(str) skip_past_char (str, ',')
1118
1119 /* Arithmetic expressions (possibly involving symbols). */
1120
1121 /* Return TRUE if anything in the expression is a bignum. */
1122
1123 static bfd_boolean
1124 walk_no_bignums (symbolS * sp)
1125 {
1126 if (symbol_get_value_expression (sp)->X_op == O_big)
1127 return TRUE;
1128
1129 if (symbol_get_value_expression (sp)->X_add_symbol)
1130 {
1131 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1132 || (symbol_get_value_expression (sp)->X_op_symbol
1133 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1134 }
1135
1136 return FALSE;
1137 }
1138
1139 static bfd_boolean in_my_get_expression = FALSE;
1140
1141 /* Third argument to my_get_expression. */
1142 #define GE_NO_PREFIX 0
1143 #define GE_IMM_PREFIX 1
1144 #define GE_OPT_PREFIX 2
1145 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1146 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1147 #define GE_OPT_PREFIX_BIG 3
1148
1149 static int
1150 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1151 {
1152 char * save_in;
1153
1154 /* In unified syntax, all prefixes are optional. */
1155 if (unified_syntax)
1156 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1157 : GE_OPT_PREFIX;
1158
1159 switch (prefix_mode)
1160 {
1161 case GE_NO_PREFIX: break;
1162 case GE_IMM_PREFIX:
1163 if (!is_immediate_prefix (**str))
1164 {
1165 inst.error = _("immediate expression requires a # prefix");
1166 return FAIL;
1167 }
1168 (*str)++;
1169 break;
1170 case GE_OPT_PREFIX:
1171 case GE_OPT_PREFIX_BIG:
1172 if (is_immediate_prefix (**str))
1173 (*str)++;
1174 break;
1175 default:
1176 abort ();
1177 }
1178
1179 memset (ep, 0, sizeof (expressionS));
1180
1181 save_in = input_line_pointer;
1182 input_line_pointer = *str;
1183 in_my_get_expression = TRUE;
1184 expression (ep);
1185 in_my_get_expression = FALSE;
1186
1187 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1188 {
1189 /* We found a bad or missing expression in md_operand(). */
1190 *str = input_line_pointer;
1191 input_line_pointer = save_in;
1192 if (inst.error == NULL)
1193 inst.error = (ep->X_op == O_absent
1194 ? _("missing expression") :_("bad expression"));
1195 return 1;
1196 }
1197
1198 /* Get rid of any bignums now, so that we don't generate an error for which
1199 we can't establish a line number later on. Big numbers are never valid
1200 in instructions, which is where this routine is always called. */
1201 if (prefix_mode != GE_OPT_PREFIX_BIG
1202 && (ep->X_op == O_big
1203 || (ep->X_add_symbol
1204 && (walk_no_bignums (ep->X_add_symbol)
1205 || (ep->X_op_symbol
1206 && walk_no_bignums (ep->X_op_symbol))))))
1207 {
1208 inst.error = _("invalid constant");
1209 *str = input_line_pointer;
1210 input_line_pointer = save_in;
1211 return 1;
1212 }
1213
1214 *str = input_line_pointer;
1215 input_line_pointer = save_in;
1216 return SUCCESS;
1217 }
1218
1219 /* Turn a string in input_line_pointer into a floating point constant
1220 of type TYPE, and store the appropriate bytes in *LITP. The number
1221 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1222 returned, or NULL on OK.
1223
1224 Note that fp constants aren't represent in the normal way on the ARM.
1225 In big endian mode, things are as expected. However, in little endian
1226 mode fp constants are big-endian word-wise, and little-endian byte-wise
1227 within the words. For example, (double) 1.1 in big endian mode is
1228 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1229 the byte sequence 99 99 f1 3f 9a 99 99 99.
1230
1231 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1232
1233 const char *
1234 md_atof (int type, char * litP, int * sizeP)
1235 {
1236 int prec;
1237 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1238 char *t;
1239 int i;
1240
1241 switch (type)
1242 {
1243 case 'H':
1244 case 'h':
1245 prec = 1;
1246 break;
1247
1248 /* If this is a bfloat16, then parse it slightly differently, as it
1249 does not follow the IEEE specification for floating point numbers
1250 exactly. */
1251 case 'b':
1252 {
1253 FLONUM_TYPE generic_float;
1254
1255 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
1256
1257 if (t)
1258 input_line_pointer = t;
1259 else
1260 return _("invalid floating point number");
1261
1262 switch (generic_float.sign)
1263 {
1264 /* Is +Inf. */
1265 case 'P':
1266 words[0] = 0x7f80;
1267 break;
1268
1269 /* Is -Inf. */
1270 case 'N':
1271 words[0] = 0xff80;
1272 break;
1273
1274 /* Is NaN. */
1275 /* bfloat16 has two types of NaN - quiet and signalling.
1276 Quiet NaN has bit[6] == 1 && faction != 0, whereas
1277 signalling NaN's have bit[0] == 0 && fraction != 0.
1278 Chosen this specific encoding as it is the same form
1279 as used by other IEEE 754 encodings in GAS. */
1280 case 0:
1281 words[0] = 0x7fff;
1282 break;
1283
1284 default:
1285 break;
1286 }
1287
1288 *sizeP = 2;
1289
1290 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
1291
1292 return NULL;
1293 }
1294 case 'f':
1295 case 'F':
1296 case 's':
1297 case 'S':
1298 prec = 2;
1299 break;
1300
1301 case 'd':
1302 case 'D':
1303 case 'r':
1304 case 'R':
1305 prec = 4;
1306 break;
1307
1308 case 'x':
1309 case 'X':
1310 prec = 5;
1311 break;
1312
1313 case 'p':
1314 case 'P':
1315 prec = 5;
1316 break;
1317
1318 default:
1319 *sizeP = 0;
1320 return _("Unrecognized or unsupported floating point constant");
1321 }
1322
1323 t = atof_ieee (input_line_pointer, type, words);
1324 if (t)
1325 input_line_pointer = t;
1326 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1327
1328 if (target_big_endian || prec == 1)
1329 for (i = 0; i < prec; i++)
1330 {
1331 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1332 litP += sizeof (LITTLENUM_TYPE);
1333 }
1334 else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1335 for (i = prec - 1; i >= 0; i--)
1336 {
1337 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1338 litP += sizeof (LITTLENUM_TYPE);
1339 }
1340 else
1341 /* For a 4 byte float the order of elements in `words' is 1 0.
1342 For an 8 byte float the order is 1 0 3 2. */
1343 for (i = 0; i < prec; i += 2)
1344 {
1345 md_number_to_chars (litP, (valueT) words[i + 1],
1346 sizeof (LITTLENUM_TYPE));
1347 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1348 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1349 litP += 2 * sizeof (LITTLENUM_TYPE);
1350 }
1351
1352 return NULL;
1353 }
1354
1355 /* We handle all bad expressions here, so that we can report the faulty
1356 instruction in the error message. */
1357
1358 void
1359 md_operand (expressionS * exp)
1360 {
1361 if (in_my_get_expression)
1362 exp->X_op = O_illegal;
1363 }
1364
1365 /* Immediate values. */
1366
1367 #ifdef OBJ_ELF
1368 /* Generic immediate-value read function for use in directives.
1369 Accepts anything that 'expression' can fold to a constant.
1370 *val receives the number. */
1371
1372 static int
1373 immediate_for_directive (int *val)
1374 {
1375 expressionS exp;
1376 exp.X_op = O_illegal;
1377
1378 if (is_immediate_prefix (*input_line_pointer))
1379 {
1380 input_line_pointer++;
1381 expression (&exp);
1382 }
1383
1384 if (exp.X_op != O_constant)
1385 {
1386 as_bad (_("expected #constant"));
1387 ignore_rest_of_line ();
1388 return FAIL;
1389 }
1390 *val = exp.X_add_number;
1391 return SUCCESS;
1392 }
1393 #endif
1394
1395 /* Register parsing. */
1396
1397 /* Generic register parser. CCP points to what should be the
1398 beginning of a register name. If it is indeed a valid register
1399 name, advance CCP over it and return the reg_entry structure;
1400 otherwise return NULL. Does not issue diagnostics. */
1401
1402 static struct reg_entry *
1403 arm_reg_parse_multi (char **ccp)
1404 {
1405 char *start = *ccp;
1406 char *p;
1407 struct reg_entry *reg;
1408
1409 skip_whitespace (start);
1410
1411 #ifdef REGISTER_PREFIX
1412 if (*start != REGISTER_PREFIX)
1413 return NULL;
1414 start++;
1415 #endif
1416 #ifdef OPTIONAL_REGISTER_PREFIX
1417 if (*start == OPTIONAL_REGISTER_PREFIX)
1418 start++;
1419 #endif
1420
1421 p = start;
1422 if (!ISALPHA (*p) || !is_name_beginner (*p))
1423 return NULL;
1424
1425 do
1426 p++;
1427 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1428
1429 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1430
1431 if (!reg)
1432 return NULL;
1433
1434 *ccp = p;
1435 return reg;
1436 }
1437
1438 static int
1439 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1440 enum arm_reg_type type)
1441 {
1442 /* Alternative syntaxes are accepted for a few register classes. */
1443 switch (type)
1444 {
1445 case REG_TYPE_MVF:
1446 case REG_TYPE_MVD:
1447 case REG_TYPE_MVFX:
1448 case REG_TYPE_MVDX:
1449 /* Generic coprocessor register names are allowed for these. */
1450 if (reg && reg->type == REG_TYPE_CN)
1451 return reg->number;
1452 break;
1453
1454 case REG_TYPE_CP:
1455 /* For backward compatibility, a bare number is valid here. */
1456 {
1457 unsigned long processor = strtoul (start, ccp, 10);
1458 if (*ccp != start && processor <= 15)
1459 return processor;
1460 }
1461 /* Fall through. */
1462
1463 case REG_TYPE_MMXWC:
1464 /* WC includes WCG. ??? I'm not sure this is true for all
1465 instructions that take WC registers. */
1466 if (reg && reg->type == REG_TYPE_MMXWCG)
1467 return reg->number;
1468 break;
1469
1470 default:
1471 break;
1472 }
1473
1474 return FAIL;
1475 }
1476
1477 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1478 return value is the register number or FAIL. */
1479
1480 static int
1481 arm_reg_parse (char **ccp, enum arm_reg_type type)
1482 {
1483 char *start = *ccp;
1484 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1485 int ret;
1486
1487 /* Do not allow a scalar (reg+index) to parse as a register. */
1488 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1489 return FAIL;
1490
1491 if (reg && reg->type == type)
1492 return reg->number;
1493
1494 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1495 return ret;
1496
1497 *ccp = start;
1498 return FAIL;
1499 }
1500
1501 /* Parse a Neon type specifier. *STR should point at the leading '.'
1502 character. Does no verification at this stage that the type fits the opcode
1503 properly. E.g.,
1504
1505 .i32.i32.s16
1506 .s32.f32
1507 .u16
1508
1509 Can all be legally parsed by this function.
1510
1511 Fills in neon_type struct pointer with parsed information, and updates STR
1512 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1513 type, FAIL if not. */
1514
1515 static int
1516 parse_neon_type (struct neon_type *type, char **str)
1517 {
1518 char *ptr = *str;
1519
1520 if (type)
1521 type->elems = 0;
1522
1523 while (type->elems < NEON_MAX_TYPE_ELS)
1524 {
1525 enum neon_el_type thistype = NT_untyped;
1526 unsigned thissize = -1u;
1527
1528 if (*ptr != '.')
1529 break;
1530
1531 ptr++;
1532
1533 /* Just a size without an explicit type. */
1534 if (ISDIGIT (*ptr))
1535 goto parsesize;
1536
1537 switch (TOLOWER (*ptr))
1538 {
1539 case 'i': thistype = NT_integer; break;
1540 case 'f': thistype = NT_float; break;
1541 case 'p': thistype = NT_poly; break;
1542 case 's': thistype = NT_signed; break;
1543 case 'u': thistype = NT_unsigned; break;
1544 case 'd':
1545 thistype = NT_float;
1546 thissize = 64;
1547 ptr++;
1548 goto done;
1549 case 'b':
1550 thistype = NT_bfloat;
1551 switch (TOLOWER (*(++ptr)))
1552 {
1553 case 'f':
1554 ptr += 1;
1555 thissize = strtoul (ptr, &ptr, 10);
1556 if (thissize != 16)
1557 {
1558 as_bad (_("bad size %d in type specifier"), thissize);
1559 return FAIL;
1560 }
1561 goto done;
1562 case '0': case '1': case '2': case '3': case '4':
1563 case '5': case '6': case '7': case '8': case '9':
1564 case ' ': case '.':
1565 as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1566 return FAIL;
1567 default:
1568 break;
1569 }
1570 break;
1571 default:
1572 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1573 return FAIL;
1574 }
1575
1576 ptr++;
1577
1578 /* .f is an abbreviation for .f32. */
1579 if (thistype == NT_float && !ISDIGIT (*ptr))
1580 thissize = 32;
1581 else
1582 {
1583 parsesize:
1584 thissize = strtoul (ptr, &ptr, 10);
1585
1586 if (thissize != 8 && thissize != 16 && thissize != 32
1587 && thissize != 64)
1588 {
1589 as_bad (_("bad size %d in type specifier"), thissize);
1590 return FAIL;
1591 }
1592 }
1593
1594 done:
1595 if (type)
1596 {
1597 type->el[type->elems].type = thistype;
1598 type->el[type->elems].size = thissize;
1599 type->elems++;
1600 }
1601 }
1602
1603 /* Empty/missing type is not a successful parse. */
1604 if (type->elems == 0)
1605 return FAIL;
1606
1607 *str = ptr;
1608
1609 return SUCCESS;
1610 }
1611
1612 /* Errors may be set multiple times during parsing or bit encoding
1613 (particularly in the Neon bits), but usually the earliest error which is set
1614 will be the most meaningful. Avoid overwriting it with later (cascading)
1615 errors by calling this function. */
1616
1617 static void
1618 first_error (const char *err)
1619 {
1620 if (!inst.error)
1621 inst.error = err;
1622 }
1623
1624 /* Parse a single type, e.g. ".s32", leading period included. */
1625 static int
1626 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1627 {
1628 char *str = *ccp;
1629 struct neon_type optype;
1630
1631 if (*str == '.')
1632 {
1633 if (parse_neon_type (&optype, &str) == SUCCESS)
1634 {
1635 if (optype.elems == 1)
1636 *vectype = optype.el[0];
1637 else
1638 {
1639 first_error (_("only one type should be specified for operand"));
1640 return FAIL;
1641 }
1642 }
1643 else
1644 {
1645 first_error (_("vector type expected"));
1646 return FAIL;
1647 }
1648 }
1649 else
1650 return FAIL;
1651
1652 *ccp = str;
1653
1654 return SUCCESS;
1655 }
1656
1657 /* Special meanings for indices (which have a range of 0-7), which will fit into
1658 a 4-bit integer. */
1659
1660 #define NEON_ALL_LANES 15
1661 #define NEON_INTERLEAVE_LANES 14
1662
1663 /* Record a use of the given feature. */
1664 static void
1665 record_feature_use (const arm_feature_set *feature)
1666 {
1667 if (thumb_mode)
1668 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1669 else
1670 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1671 }
1672
1673 /* If the given feature available in the selected CPU, mark it as used.
1674 Returns TRUE iff feature is available. */
1675 static bfd_boolean
1676 mark_feature_used (const arm_feature_set *feature)
1677 {
1678
1679 /* Do not support the use of MVE only instructions when in auto-detection or
1680 -march=all. */
1681 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1682 && ARM_CPU_IS_ANY (cpu_variant))
1683 {
1684 first_error (BAD_MVE_AUTO);
1685 return FALSE;
1686 }
1687 /* Ensure the option is valid on the current architecture. */
1688 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1689 return FALSE;
1690
1691 /* Add the appropriate architecture feature for the barrier option used.
1692 */
1693 record_feature_use (feature);
1694
1695 return TRUE;
1696 }
1697
1698 /* Parse either a register or a scalar, with an optional type. Return the
1699 register number, and optionally fill in the actual type of the register
1700 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1701 type/index information in *TYPEINFO. */
1702
1703 static int
1704 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1705 enum arm_reg_type *rtype,
1706 struct neon_typed_alias *typeinfo)
1707 {
1708 char *str = *ccp;
1709 struct reg_entry *reg = arm_reg_parse_multi (&str);
1710 struct neon_typed_alias atype;
1711 struct neon_type_el parsetype;
1712
1713 atype.defined = 0;
1714 atype.index = -1;
1715 atype.eltype.type = NT_invtype;
1716 atype.eltype.size = -1;
1717
1718 /* Try alternate syntax for some types of register. Note these are mutually
1719 exclusive with the Neon syntax extensions. */
1720 if (reg == NULL)
1721 {
1722 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1723 if (altreg != FAIL)
1724 *ccp = str;
1725 if (typeinfo)
1726 *typeinfo = atype;
1727 return altreg;
1728 }
1729
1730 /* Undo polymorphism when a set of register types may be accepted. */
1731 if ((type == REG_TYPE_NDQ
1732 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1733 || (type == REG_TYPE_VFSD
1734 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1735 || (type == REG_TYPE_NSDQ
1736 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1737 || reg->type == REG_TYPE_NQ))
1738 || (type == REG_TYPE_NSD
1739 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1740 || (type == REG_TYPE_MMXWC
1741 && (reg->type == REG_TYPE_MMXWCG)))
1742 type = (enum arm_reg_type) reg->type;
1743
1744 if (type == REG_TYPE_MQ)
1745 {
1746 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1747 return FAIL;
1748
1749 if (!reg || reg->type != REG_TYPE_NQ)
1750 return FAIL;
1751
1752 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1753 {
1754 first_error (_("expected MVE register [q0..q7]"));
1755 return FAIL;
1756 }
1757 type = REG_TYPE_NQ;
1758 }
1759 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1760 && (type == REG_TYPE_NQ))
1761 return FAIL;
1762
1763
1764 if (type != reg->type)
1765 return FAIL;
1766
1767 if (reg->neon)
1768 atype = *reg->neon;
1769
1770 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1771 {
1772 if ((atype.defined & NTA_HASTYPE) != 0)
1773 {
1774 first_error (_("can't redefine type for operand"));
1775 return FAIL;
1776 }
1777 atype.defined |= NTA_HASTYPE;
1778 atype.eltype = parsetype;
1779 }
1780
1781 if (skip_past_char (&str, '[') == SUCCESS)
1782 {
1783 if (type != REG_TYPE_VFD
1784 && !(type == REG_TYPE_VFS
1785 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1786 && !(type == REG_TYPE_NQ
1787 && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1788 {
1789 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1790 first_error (_("only D and Q registers may be indexed"));
1791 else
1792 first_error (_("only D registers may be indexed"));
1793 return FAIL;
1794 }
1795
1796 if ((atype.defined & NTA_HASINDEX) != 0)
1797 {
1798 first_error (_("can't change index for operand"));
1799 return FAIL;
1800 }
1801
1802 atype.defined |= NTA_HASINDEX;
1803
1804 if (skip_past_char (&str, ']') == SUCCESS)
1805 atype.index = NEON_ALL_LANES;
1806 else
1807 {
1808 expressionS exp;
1809
1810 my_get_expression (&exp, &str, GE_NO_PREFIX);
1811
1812 if (exp.X_op != O_constant)
1813 {
1814 first_error (_("constant expression required"));
1815 return FAIL;
1816 }
1817
1818 if (skip_past_char (&str, ']') == FAIL)
1819 return FAIL;
1820
1821 atype.index = exp.X_add_number;
1822 }
1823 }
1824
1825 if (typeinfo)
1826 *typeinfo = atype;
1827
1828 if (rtype)
1829 *rtype = type;
1830
1831 *ccp = str;
1832
1833 return reg->number;
1834 }
1835
1836 /* Like arm_reg_parse, but also allow the following extra features:
1837 - If RTYPE is non-zero, return the (possibly restricted) type of the
1838 register (e.g. Neon double or quad reg when either has been requested).
1839 - If this is a Neon vector type with additional type information, fill
1840 in the struct pointed to by VECTYPE (if non-NULL).
1841 This function will fault on encountering a scalar. */
1842
1843 static int
1844 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1845 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1846 {
1847 struct neon_typed_alias atype;
1848 char *str = *ccp;
1849 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1850
1851 if (reg == FAIL)
1852 return FAIL;
1853
1854 /* Do not allow regname(... to parse as a register. */
1855 if (*str == '(')
1856 return FAIL;
1857
1858 /* Do not allow a scalar (reg+index) to parse as a register. */
1859 if ((atype.defined & NTA_HASINDEX) != 0)
1860 {
1861 first_error (_("register operand expected, but got scalar"));
1862 return FAIL;
1863 }
1864
1865 if (vectype)
1866 *vectype = atype.eltype;
1867
1868 *ccp = str;
1869
1870 return reg;
1871 }
1872
1873 #define NEON_SCALAR_REG(X) ((X) >> 4)
1874 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1875
1876 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1877 have enough information to be able to do a good job bounds-checking. So, we
1878 just do easy checks here, and do further checks later. */
1879
1880 static int
1881 parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1882 arm_reg_type reg_type)
1883 {
1884 int reg;
1885 char *str = *ccp;
1886 struct neon_typed_alias atype;
1887 unsigned reg_size;
1888
1889 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1890
1891 switch (reg_type)
1892 {
1893 case REG_TYPE_VFS:
1894 reg_size = 32;
1895 break;
1896 case REG_TYPE_VFD:
1897 reg_size = 64;
1898 break;
1899 case REG_TYPE_MQ:
1900 reg_size = 128;
1901 break;
1902 default:
1903 gas_assert (0);
1904 return FAIL;
1905 }
1906
1907 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1908 return FAIL;
1909
1910 if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1911 {
1912 first_error (_("scalar must have an index"));
1913 return FAIL;
1914 }
1915 else if (atype.index >= reg_size / elsize)
1916 {
1917 first_error (_("scalar index out of range"));
1918 return FAIL;
1919 }
1920
1921 if (type)
1922 *type = atype.eltype;
1923
1924 *ccp = str;
1925
1926 return reg * 16 + atype.index;
1927 }
1928
1929 /* Types of registers in a list. */
1930
1931 enum reg_list_els
1932 {
1933 REGLIST_RN,
1934 REGLIST_CLRM,
1935 REGLIST_VFP_S,
1936 REGLIST_VFP_S_VPR,
1937 REGLIST_VFP_D,
1938 REGLIST_VFP_D_VPR,
1939 REGLIST_NEON_D
1940 };
1941
1942 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1943
1944 static long
1945 parse_reg_list (char ** strp, enum reg_list_els etype)
1946 {
1947 char *str = *strp;
1948 long range = 0;
1949 int another_range;
1950
1951 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1952
1953 /* We come back here if we get ranges concatenated by '+' or '|'. */
1954 do
1955 {
1956 skip_whitespace (str);
1957
1958 another_range = 0;
1959
1960 if (*str == '{')
1961 {
1962 int in_range = 0;
1963 int cur_reg = -1;
1964
1965 str++;
1966 do
1967 {
1968 int reg;
1969 const char apsr_str[] = "apsr";
1970 int apsr_str_len = strlen (apsr_str);
1971
1972 reg = arm_reg_parse (&str, REG_TYPE_RN);
1973 if (etype == REGLIST_CLRM)
1974 {
1975 if (reg == REG_SP || reg == REG_PC)
1976 reg = FAIL;
1977 else if (reg == FAIL
1978 && !strncasecmp (str, apsr_str, apsr_str_len)
1979 && !ISALPHA (*(str + apsr_str_len)))
1980 {
1981 reg = 15;
1982 str += apsr_str_len;
1983 }
1984
1985 if (reg == FAIL)
1986 {
1987 first_error (_("r0-r12, lr or APSR expected"));
1988 return FAIL;
1989 }
1990 }
1991 else /* etype == REGLIST_RN. */
1992 {
1993 if (reg == FAIL)
1994 {
1995 first_error (_(reg_expected_msgs[REGLIST_RN]));
1996 return FAIL;
1997 }
1998 }
1999
2000 if (in_range)
2001 {
2002 int i;
2003
2004 if (reg <= cur_reg)
2005 {
2006 first_error (_("bad range in register list"));
2007 return FAIL;
2008 }
2009
2010 for (i = cur_reg + 1; i < reg; i++)
2011 {
2012 if (range & (1 << i))
2013 as_tsktsk
2014 (_("Warning: duplicated register (r%d) in register list"),
2015 i);
2016 else
2017 range |= 1 << i;
2018 }
2019 in_range = 0;
2020 }
2021
2022 if (range & (1 << reg))
2023 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
2024 reg);
2025 else if (reg <= cur_reg)
2026 as_tsktsk (_("Warning: register range not in ascending order"));
2027
2028 range |= 1 << reg;
2029 cur_reg = reg;
2030 }
2031 while (skip_past_comma (&str) != FAIL
2032 || (in_range = 1, *str++ == '-'));
2033 str--;
2034
2035 if (skip_past_char (&str, '}') == FAIL)
2036 {
2037 first_error (_("missing `}'"));
2038 return FAIL;
2039 }
2040 }
2041 else if (etype == REGLIST_RN)
2042 {
2043 expressionS exp;
2044
2045 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2046 return FAIL;
2047
2048 if (exp.X_op == O_constant)
2049 {
2050 if (exp.X_add_number
2051 != (exp.X_add_number & 0x0000ffff))
2052 {
2053 inst.error = _("invalid register mask");
2054 return FAIL;
2055 }
2056
2057 if ((range & exp.X_add_number) != 0)
2058 {
2059 int regno = range & exp.X_add_number;
2060
2061 regno &= -regno;
2062 regno = (1 << regno) - 1;
2063 as_tsktsk
2064 (_("Warning: duplicated register (r%d) in register list"),
2065 regno);
2066 }
2067
2068 range |= exp.X_add_number;
2069 }
2070 else
2071 {
2072 if (inst.relocs[0].type != 0)
2073 {
2074 inst.error = _("expression too complex");
2075 return FAIL;
2076 }
2077
2078 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2079 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2080 inst.relocs[0].pc_rel = 0;
2081 }
2082 }
2083
2084 if (*str == '|' || *str == '+')
2085 {
2086 str++;
2087 another_range = 1;
2088 }
2089 }
2090 while (another_range);
2091
2092 *strp = str;
2093 return range;
2094 }
2095
2096 /* Parse a VFP register list. If the string is invalid return FAIL.
2097 Otherwise return the number of registers, and set PBASE to the first
2098 register. Parses registers of type ETYPE.
2099 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2100 - Q registers can be used to specify pairs of D registers
2101 - { } can be omitted from around a singleton register list
2102 FIXME: This is not implemented, as it would require backtracking in
2103 some cases, e.g.:
2104 vtbl.8 d3,d4,d5
2105 This could be done (the meaning isn't really ambiguous), but doesn't
2106 fit in well with the current parsing framework.
2107 - 32 D registers may be used (also true for VFPv3).
2108 FIXME: Types are ignored in these register lists, which is probably a
2109 bug. */
2110
2111 static int
2112 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2113 bfd_boolean *partial_match)
2114 {
2115 char *str = *ccp;
2116 int base_reg;
2117 int new_base;
2118 enum arm_reg_type regtype = (enum arm_reg_type) 0;
2119 int max_regs = 0;
2120 int count = 0;
2121 int warned = 0;
2122 unsigned long mask = 0;
2123 int i;
2124 bfd_boolean vpr_seen = FALSE;
2125 bfd_boolean expect_vpr =
2126 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2127
2128 if (skip_past_char (&str, '{') == FAIL)
2129 {
2130 inst.error = _("expecting {");
2131 return FAIL;
2132 }
2133
2134 switch (etype)
2135 {
2136 case REGLIST_VFP_S:
2137 case REGLIST_VFP_S_VPR:
2138 regtype = REG_TYPE_VFS;
2139 max_regs = 32;
2140 break;
2141
2142 case REGLIST_VFP_D:
2143 case REGLIST_VFP_D_VPR:
2144 regtype = REG_TYPE_VFD;
2145 break;
2146
2147 case REGLIST_NEON_D:
2148 regtype = REG_TYPE_NDQ;
2149 break;
2150
2151 default:
2152 gas_assert (0);
2153 }
2154
2155 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2156 {
2157 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2158 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2159 {
2160 max_regs = 32;
2161 if (thumb_mode)
2162 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2163 fpu_vfp_ext_d32);
2164 else
2165 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2166 fpu_vfp_ext_d32);
2167 }
2168 else
2169 max_regs = 16;
2170 }
2171
2172 base_reg = max_regs;
2173 *partial_match = FALSE;
2174
2175 do
2176 {
2177 int setmask = 1, addregs = 1;
2178 const char vpr_str[] = "vpr";
2179 int vpr_str_len = strlen (vpr_str);
2180
2181 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2182
2183 if (expect_vpr)
2184 {
2185 if (new_base == FAIL
2186 && !strncasecmp (str, vpr_str, vpr_str_len)
2187 && !ISALPHA (*(str + vpr_str_len))
2188 && !vpr_seen)
2189 {
2190 vpr_seen = TRUE;
2191 str += vpr_str_len;
2192 if (count == 0)
2193 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2194 }
2195 else if (vpr_seen)
2196 {
2197 first_error (_("VPR expected last"));
2198 return FAIL;
2199 }
2200 else if (new_base == FAIL)
2201 {
2202 if (regtype == REG_TYPE_VFS)
2203 first_error (_("VFP single precision register or VPR "
2204 "expected"));
2205 else /* regtype == REG_TYPE_VFD. */
2206 first_error (_("VFP/Neon double precision register or VPR "
2207 "expected"));
2208 return FAIL;
2209 }
2210 }
2211 else if (new_base == FAIL)
2212 {
2213 first_error (_(reg_expected_msgs[regtype]));
2214 return FAIL;
2215 }
2216
2217 *partial_match = TRUE;
2218 if (vpr_seen)
2219 continue;
2220
2221 if (new_base >= max_regs)
2222 {
2223 first_error (_("register out of range in list"));
2224 return FAIL;
2225 }
2226
2227 /* Note: a value of 2 * n is returned for the register Q<n>. */
2228 if (regtype == REG_TYPE_NQ)
2229 {
2230 setmask = 3;
2231 addregs = 2;
2232 }
2233
2234 if (new_base < base_reg)
2235 base_reg = new_base;
2236
2237 if (mask & (setmask << new_base))
2238 {
2239 first_error (_("invalid register list"));
2240 return FAIL;
2241 }
2242
2243 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2244 {
2245 as_tsktsk (_("register list not in ascending order"));
2246 warned = 1;
2247 }
2248
2249 mask |= setmask << new_base;
2250 count += addregs;
2251
2252 if (*str == '-') /* We have the start of a range expression */
2253 {
2254 int high_range;
2255
2256 str++;
2257
2258 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2259 == FAIL)
2260 {
2261 inst.error = gettext (reg_expected_msgs[regtype]);
2262 return FAIL;
2263 }
2264
2265 if (high_range >= max_regs)
2266 {
2267 first_error (_("register out of range in list"));
2268 return FAIL;
2269 }
2270
2271 if (regtype == REG_TYPE_NQ)
2272 high_range = high_range + 1;
2273
2274 if (high_range <= new_base)
2275 {
2276 inst.error = _("register range not in ascending order");
2277 return FAIL;
2278 }
2279
2280 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2281 {
2282 if (mask & (setmask << new_base))
2283 {
2284 inst.error = _("invalid register list");
2285 return FAIL;
2286 }
2287
2288 mask |= setmask << new_base;
2289 count += addregs;
2290 }
2291 }
2292 }
2293 while (skip_past_comma (&str) != FAIL);
2294
2295 str++;
2296
2297 /* Sanity check -- should have raised a parse error above. */
2298 if ((!vpr_seen && count == 0) || count > max_regs)
2299 abort ();
2300
2301 *pbase = base_reg;
2302
2303 if (expect_vpr && !vpr_seen)
2304 {
2305 first_error (_("VPR expected last"));
2306 return FAIL;
2307 }
2308
2309 /* Final test -- the registers must be consecutive. */
2310 mask >>= base_reg;
2311 for (i = 0; i < count; i++)
2312 {
2313 if ((mask & (1u << i)) == 0)
2314 {
2315 inst.error = _("non-contiguous register range");
2316 return FAIL;
2317 }
2318 }
2319
2320 *ccp = str;
2321
2322 return count;
2323 }
2324
2325 /* True if two alias types are the same. */
2326
2327 static bfd_boolean
2328 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2329 {
2330 if (!a && !b)
2331 return TRUE;
2332
2333 if (!a || !b)
2334 return FALSE;
2335
2336 if (a->defined != b->defined)
2337 return FALSE;
2338
2339 if ((a->defined & NTA_HASTYPE) != 0
2340 && (a->eltype.type != b->eltype.type
2341 || a->eltype.size != b->eltype.size))
2342 return FALSE;
2343
2344 if ((a->defined & NTA_HASINDEX) != 0
2345 && (a->index != b->index))
2346 return FALSE;
2347
2348 return TRUE;
2349 }
2350
2351 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2352 The base register is put in *PBASE.
2353 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2354 the return value.
2355 The register stride (minus one) is put in bit 4 of the return value.
2356 Bits [6:5] encode the list length (minus one).
2357 The type of the list elements is put in *ELTYPE, if non-NULL. */
2358
2359 #define NEON_LANE(X) ((X) & 0xf)
2360 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2361 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2362
2363 static int
2364 parse_neon_el_struct_list (char **str, unsigned *pbase,
2365 int mve,
2366 struct neon_type_el *eltype)
2367 {
2368 char *ptr = *str;
2369 int base_reg = -1;
2370 int reg_incr = -1;
2371 int count = 0;
2372 int lane = -1;
2373 int leading_brace = 0;
2374 enum arm_reg_type rtype = REG_TYPE_NDQ;
2375 const char *const incr_error = mve ? _("register stride must be 1") :
2376 _("register stride must be 1 or 2");
2377 const char *const type_error = _("mismatched element/structure types in list");
2378 struct neon_typed_alias firsttype;
2379 firsttype.defined = 0;
2380 firsttype.eltype.type = NT_invtype;
2381 firsttype.eltype.size = -1;
2382 firsttype.index = -1;
2383
2384 if (skip_past_char (&ptr, '{') == SUCCESS)
2385 leading_brace = 1;
2386
2387 do
2388 {
2389 struct neon_typed_alias atype;
2390 if (mve)
2391 rtype = REG_TYPE_MQ;
2392 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2393
2394 if (getreg == FAIL)
2395 {
2396 first_error (_(reg_expected_msgs[rtype]));
2397 return FAIL;
2398 }
2399
2400 if (base_reg == -1)
2401 {
2402 base_reg = getreg;
2403 if (rtype == REG_TYPE_NQ)
2404 {
2405 reg_incr = 1;
2406 }
2407 firsttype = atype;
2408 }
2409 else if (reg_incr == -1)
2410 {
2411 reg_incr = getreg - base_reg;
2412 if (reg_incr < 1 || reg_incr > 2)
2413 {
2414 first_error (_(incr_error));
2415 return FAIL;
2416 }
2417 }
2418 else if (getreg != base_reg + reg_incr * count)
2419 {
2420 first_error (_(incr_error));
2421 return FAIL;
2422 }
2423
2424 if (! neon_alias_types_same (&atype, &firsttype))
2425 {
2426 first_error (_(type_error));
2427 return FAIL;
2428 }
2429
2430 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2431 modes. */
2432 if (ptr[0] == '-')
2433 {
2434 struct neon_typed_alias htype;
2435 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2436 if (lane == -1)
2437 lane = NEON_INTERLEAVE_LANES;
2438 else if (lane != NEON_INTERLEAVE_LANES)
2439 {
2440 first_error (_(type_error));
2441 return FAIL;
2442 }
2443 if (reg_incr == -1)
2444 reg_incr = 1;
2445 else if (reg_incr != 1)
2446 {
2447 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2448 return FAIL;
2449 }
2450 ptr++;
2451 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2452 if (hireg == FAIL)
2453 {
2454 first_error (_(reg_expected_msgs[rtype]));
2455 return FAIL;
2456 }
2457 if (! neon_alias_types_same (&htype, &firsttype))
2458 {
2459 first_error (_(type_error));
2460 return FAIL;
2461 }
2462 count += hireg + dregs - getreg;
2463 continue;
2464 }
2465
2466 /* If we're using Q registers, we can't use [] or [n] syntax. */
2467 if (rtype == REG_TYPE_NQ)
2468 {
2469 count += 2;
2470 continue;
2471 }
2472
2473 if ((atype.defined & NTA_HASINDEX) != 0)
2474 {
2475 if (lane == -1)
2476 lane = atype.index;
2477 else if (lane != atype.index)
2478 {
2479 first_error (_(type_error));
2480 return FAIL;
2481 }
2482 }
2483 else if (lane == -1)
2484 lane = NEON_INTERLEAVE_LANES;
2485 else if (lane != NEON_INTERLEAVE_LANES)
2486 {
2487 first_error (_(type_error));
2488 return FAIL;
2489 }
2490 count++;
2491 }
2492 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2493
2494 /* No lane set by [x]. We must be interleaving structures. */
2495 if (lane == -1)
2496 lane = NEON_INTERLEAVE_LANES;
2497
2498 /* Sanity check. */
2499 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2500 || (count > 1 && reg_incr == -1))
2501 {
2502 first_error (_("error parsing element/structure list"));
2503 return FAIL;
2504 }
2505
2506 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2507 {
2508 first_error (_("expected }"));
2509 return FAIL;
2510 }
2511
2512 if (reg_incr == -1)
2513 reg_incr = 1;
2514
2515 if (eltype)
2516 *eltype = firsttype.eltype;
2517
2518 *pbase = base_reg;
2519 *str = ptr;
2520
2521 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2522 }
2523
2524 /* Parse an explicit relocation suffix on an expression. This is
2525 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2526 arm_reloc_hsh contains no entries, so this function can only
2527 succeed if there is no () after the word. Returns -1 on error,
2528 BFD_RELOC_UNUSED if there wasn't any suffix. */
2529
2530 static int
2531 parse_reloc (char **str)
2532 {
2533 struct reloc_entry *r;
2534 char *p, *q;
2535
2536 if (**str != '(')
2537 return BFD_RELOC_UNUSED;
2538
2539 p = *str + 1;
2540 q = p;
2541
2542 while (*q && *q != ')' && *q != ',')
2543 q++;
2544 if (*q != ')')
2545 return -1;
2546
2547 if ((r = (struct reloc_entry *)
2548 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2549 return -1;
2550
2551 *str = q + 1;
2552 return r->reloc;
2553 }
2554
2555 /* Directives: register aliases. */
2556
2557 static struct reg_entry *
2558 insert_reg_alias (char *str, unsigned number, int type)
2559 {
2560 struct reg_entry *new_reg;
2561 const char *name;
2562
2563 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2564 {
2565 if (new_reg->builtin)
2566 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2567
2568 /* Only warn about a redefinition if it's not defined as the
2569 same register. */
2570 else if (new_reg->number != number || new_reg->type != type)
2571 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2572
2573 return NULL;
2574 }
2575
2576 name = xstrdup (str);
2577 new_reg = XNEW (struct reg_entry);
2578
2579 new_reg->name = name;
2580 new_reg->number = number;
2581 new_reg->type = type;
2582 new_reg->builtin = FALSE;
2583 new_reg->neon = NULL;
2584
2585 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2586 abort ();
2587
2588 return new_reg;
2589 }
2590
2591 static void
2592 insert_neon_reg_alias (char *str, int number, int type,
2593 struct neon_typed_alias *atype)
2594 {
2595 struct reg_entry *reg = insert_reg_alias (str, number, type);
2596
2597 if (!reg)
2598 {
2599 first_error (_("attempt to redefine typed alias"));
2600 return;
2601 }
2602
2603 if (atype)
2604 {
2605 reg->neon = XNEW (struct neon_typed_alias);
2606 *reg->neon = *atype;
2607 }
2608 }
2609
2610 /* Look for the .req directive. This is of the form:
2611
2612 new_register_name .req existing_register_name
2613
2614 If we find one, or if it looks sufficiently like one that we want to
2615 handle any error here, return TRUE. Otherwise return FALSE. */
2616
2617 static bfd_boolean
2618 create_register_alias (char * newname, char *p)
2619 {
2620 struct reg_entry *old;
2621 char *oldname, *nbuf;
2622 size_t nlen;
2623
2624 /* The input scrubber ensures that whitespace after the mnemonic is
2625 collapsed to single spaces. */
2626 oldname = p;
2627 if (strncmp (oldname, " .req ", 6) != 0)
2628 return FALSE;
2629
2630 oldname += 6;
2631 if (*oldname == '\0')
2632 return FALSE;
2633
2634 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2635 if (!old)
2636 {
2637 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2638 return TRUE;
2639 }
2640
2641 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2642 the desired alias name, and p points to its end. If not, then
2643 the desired alias name is in the global original_case_string. */
2644 #ifdef TC_CASE_SENSITIVE
2645 nlen = p - newname;
2646 #else
2647 newname = original_case_string;
2648 nlen = strlen (newname);
2649 #endif
2650
2651 nbuf = xmemdup0 (newname, nlen);
2652
2653 /* Create aliases under the new name as stated; an all-lowercase
2654 version of the new name; and an all-uppercase version of the new
2655 name. */
2656 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2657 {
2658 for (p = nbuf; *p; p++)
2659 *p = TOUPPER (*p);
2660
2661 if (strncmp (nbuf, newname, nlen))
2662 {
2663 /* If this attempt to create an additional alias fails, do not bother
2664 trying to create the all-lower case alias. We will fail and issue
2665 a second, duplicate error message. This situation arises when the
2666 programmer does something like:
2667 foo .req r0
2668 Foo .req r1
2669 The second .req creates the "Foo" alias but then fails to create
2670 the artificial FOO alias because it has already been created by the
2671 first .req. */
2672 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2673 {
2674 free (nbuf);
2675 return TRUE;
2676 }
2677 }
2678
2679 for (p = nbuf; *p; p++)
2680 *p = TOLOWER (*p);
2681
2682 if (strncmp (nbuf, newname, nlen))
2683 insert_reg_alias (nbuf, old->number, old->type);
2684 }
2685
2686 free (nbuf);
2687 return TRUE;
2688 }
2689
2690 /* Create a Neon typed/indexed register alias using directives, e.g.:
2691 X .dn d5.s32[1]
2692 Y .qn 6.s16
2693 Z .dn d7
2694 T .dn Z[0]
2695 These typed registers can be used instead of the types specified after the
2696 Neon mnemonic, so long as all operands given have types. Types can also be
2697 specified directly, e.g.:
2698 vadd d0.s32, d1.s32, d2.s32 */
2699
2700 static bfd_boolean
2701 create_neon_reg_alias (char *newname, char *p)
2702 {
2703 enum arm_reg_type basetype;
2704 struct reg_entry *basereg;
2705 struct reg_entry mybasereg;
2706 struct neon_type ntype;
2707 struct neon_typed_alias typeinfo;
2708 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2709 int namelen;
2710
2711 typeinfo.defined = 0;
2712 typeinfo.eltype.type = NT_invtype;
2713 typeinfo.eltype.size = -1;
2714 typeinfo.index = -1;
2715
2716 nameend = p;
2717
2718 if (strncmp (p, " .dn ", 5) == 0)
2719 basetype = REG_TYPE_VFD;
2720 else if (strncmp (p, " .qn ", 5) == 0)
2721 basetype = REG_TYPE_NQ;
2722 else
2723 return FALSE;
2724
2725 p += 5;
2726
2727 if (*p == '\0')
2728 return FALSE;
2729
2730 basereg = arm_reg_parse_multi (&p);
2731
2732 if (basereg && basereg->type != basetype)
2733 {
2734 as_bad (_("bad type for register"));
2735 return FALSE;
2736 }
2737
2738 if (basereg == NULL)
2739 {
2740 expressionS exp;
2741 /* Try parsing as an integer. */
2742 my_get_expression (&exp, &p, GE_NO_PREFIX);
2743 if (exp.X_op != O_constant)
2744 {
2745 as_bad (_("expression must be constant"));
2746 return FALSE;
2747 }
2748 basereg = &mybasereg;
2749 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2750 : exp.X_add_number;
2751 basereg->neon = 0;
2752 }
2753
2754 if (basereg->neon)
2755 typeinfo = *basereg->neon;
2756
2757 if (parse_neon_type (&ntype, &p) == SUCCESS)
2758 {
2759 /* We got a type. */
2760 if (typeinfo.defined & NTA_HASTYPE)
2761 {
2762 as_bad (_("can't redefine the type of a register alias"));
2763 return FALSE;
2764 }
2765
2766 typeinfo.defined |= NTA_HASTYPE;
2767 if (ntype.elems != 1)
2768 {
2769 as_bad (_("you must specify a single type only"));
2770 return FALSE;
2771 }
2772 typeinfo.eltype = ntype.el[0];
2773 }
2774
2775 if (skip_past_char (&p, '[') == SUCCESS)
2776 {
2777 expressionS exp;
2778 /* We got a scalar index. */
2779
2780 if (typeinfo.defined & NTA_HASINDEX)
2781 {
2782 as_bad (_("can't redefine the index of a scalar alias"));
2783 return FALSE;
2784 }
2785
2786 my_get_expression (&exp, &p, GE_NO_PREFIX);
2787
2788 if (exp.X_op != O_constant)
2789 {
2790 as_bad (_("scalar index must be constant"));
2791 return FALSE;
2792 }
2793
2794 typeinfo.defined |= NTA_HASINDEX;
2795 typeinfo.index = exp.X_add_number;
2796
2797 if (skip_past_char (&p, ']') == FAIL)
2798 {
2799 as_bad (_("expecting ]"));
2800 return FALSE;
2801 }
2802 }
2803
2804 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2805 the desired alias name, and p points to its end. If not, then
2806 the desired alias name is in the global original_case_string. */
2807 #ifdef TC_CASE_SENSITIVE
2808 namelen = nameend - newname;
2809 #else
2810 newname = original_case_string;
2811 namelen = strlen (newname);
2812 #endif
2813
2814 namebuf = xmemdup0 (newname, namelen);
2815
2816 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2817 typeinfo.defined != 0 ? &typeinfo : NULL);
2818
2819 /* Insert name in all uppercase. */
2820 for (p = namebuf; *p; p++)
2821 *p = TOUPPER (*p);
2822
2823 if (strncmp (namebuf, newname, namelen))
2824 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2825 typeinfo.defined != 0 ? &typeinfo : NULL);
2826
2827 /* Insert name in all lowercase. */
2828 for (p = namebuf; *p; p++)
2829 *p = TOLOWER (*p);
2830
2831 if (strncmp (namebuf, newname, namelen))
2832 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2833 typeinfo.defined != 0 ? &typeinfo : NULL);
2834
2835 free (namebuf);
2836 return TRUE;
2837 }
2838
2839 /* Should never be called, as .req goes between the alias and the
2840 register name, not at the beginning of the line. */
2841
2842 static void
2843 s_req (int a ATTRIBUTE_UNUSED)
2844 {
2845 as_bad (_("invalid syntax for .req directive"));
2846 }
2847
2848 static void
2849 s_dn (int a ATTRIBUTE_UNUSED)
2850 {
2851 as_bad (_("invalid syntax for .dn directive"));
2852 }
2853
2854 static void
2855 s_qn (int a ATTRIBUTE_UNUSED)
2856 {
2857 as_bad (_("invalid syntax for .qn directive"));
2858 }
2859
2860 /* The .unreq directive deletes an alias which was previously defined
2861 by .req. For example:
2862
2863 my_alias .req r11
2864 .unreq my_alias */
2865
2866 static void
2867 s_unreq (int a ATTRIBUTE_UNUSED)
2868 {
2869 char * name;
2870 char saved_char;
2871
2872 name = input_line_pointer;
2873
2874 while (*input_line_pointer != 0
2875 && *input_line_pointer != ' '
2876 && *input_line_pointer != '\n')
2877 ++input_line_pointer;
2878
2879 saved_char = *input_line_pointer;
2880 *input_line_pointer = 0;
2881
2882 if (!*name)
2883 as_bad (_("invalid syntax for .unreq directive"));
2884 else
2885 {
2886 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2887 name);
2888
2889 if (!reg)
2890 as_bad (_("unknown register alias '%s'"), name);
2891 else if (reg->builtin)
2892 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2893 name);
2894 else
2895 {
2896 char * p;
2897 char * nbuf;
2898
2899 hash_delete (arm_reg_hsh, name, FALSE);
2900 free ((char *) reg->name);
2901 if (reg->neon)
2902 free (reg->neon);
2903 free (reg);
2904
2905 /* Also locate the all upper case and all lower case versions.
2906 Do not complain if we cannot find one or the other as it
2907 was probably deleted above. */
2908
2909 nbuf = strdup (name);
2910 for (p = nbuf; *p; p++)
2911 *p = TOUPPER (*p);
2912 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2913 if (reg)
2914 {
2915 hash_delete (arm_reg_hsh, nbuf, FALSE);
2916 free ((char *) reg->name);
2917 if (reg->neon)
2918 free (reg->neon);
2919 free (reg);
2920 }
2921
2922 for (p = nbuf; *p; p++)
2923 *p = TOLOWER (*p);
2924 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2925 if (reg)
2926 {
2927 hash_delete (arm_reg_hsh, nbuf, FALSE);
2928 free ((char *) reg->name);
2929 if (reg->neon)
2930 free (reg->neon);
2931 free (reg);
2932 }
2933
2934 free (nbuf);
2935 }
2936 }
2937
2938 *input_line_pointer = saved_char;
2939 demand_empty_rest_of_line ();
2940 }
2941
2942 /* Directives: Instruction set selection. */
2943
2944 #ifdef OBJ_ELF
2945 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2946 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2947 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2948 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2949
2950 /* Create a new mapping symbol for the transition to STATE. */
2951
2952 static void
2953 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2954 {
2955 symbolS * symbolP;
2956 const char * symname;
2957 int type;
2958
2959 switch (state)
2960 {
2961 case MAP_DATA:
2962 symname = "$d";
2963 type = BSF_NO_FLAGS;
2964 break;
2965 case MAP_ARM:
2966 symname = "$a";
2967 type = BSF_NO_FLAGS;
2968 break;
2969 case MAP_THUMB:
2970 symname = "$t";
2971 type = BSF_NO_FLAGS;
2972 break;
2973 default:
2974 abort ();
2975 }
2976
2977 symbolP = symbol_new (symname, now_seg, value, frag);
2978 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2979
2980 switch (state)
2981 {
2982 case MAP_ARM:
2983 THUMB_SET_FUNC (symbolP, 0);
2984 ARM_SET_THUMB (symbolP, 0);
2985 ARM_SET_INTERWORK (symbolP, support_interwork);
2986 break;
2987
2988 case MAP_THUMB:
2989 THUMB_SET_FUNC (symbolP, 1);
2990 ARM_SET_THUMB (symbolP, 1);
2991 ARM_SET_INTERWORK (symbolP, support_interwork);
2992 break;
2993
2994 case MAP_DATA:
2995 default:
2996 break;
2997 }
2998
2999 /* Save the mapping symbols for future reference. Also check that
3000 we do not place two mapping symbols at the same offset within a
3001 frag. We'll handle overlap between frags in
3002 check_mapping_symbols.
3003
3004 If .fill or other data filling directive generates zero sized data,
3005 the mapping symbol for the following code will have the same value
3006 as the one generated for the data filling directive. In this case,
3007 we replace the old symbol with the new one at the same address. */
3008 if (value == 0)
3009 {
3010 if (frag->tc_frag_data.first_map != NULL)
3011 {
3012 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
3013 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
3014 }
3015 frag->tc_frag_data.first_map = symbolP;
3016 }
3017 if (frag->tc_frag_data.last_map != NULL)
3018 {
3019 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
3020 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
3021 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
3022 }
3023 frag->tc_frag_data.last_map = symbolP;
3024 }
3025
3026 /* We must sometimes convert a region marked as code to data during
3027 code alignment, if an odd number of bytes have to be padded. The
3028 code mapping symbol is pushed to an aligned address. */
3029
3030 static void
3031 insert_data_mapping_symbol (enum mstate state,
3032 valueT value, fragS *frag, offsetT bytes)
3033 {
3034 /* If there was already a mapping symbol, remove it. */
3035 if (frag->tc_frag_data.last_map != NULL
3036 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3037 {
3038 symbolS *symp = frag->tc_frag_data.last_map;
3039
3040 if (value == 0)
3041 {
3042 know (frag->tc_frag_data.first_map == symp);
3043 frag->tc_frag_data.first_map = NULL;
3044 }
3045 frag->tc_frag_data.last_map = NULL;
3046 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3047 }
3048
3049 make_mapping_symbol (MAP_DATA, value, frag);
3050 make_mapping_symbol (state, value + bytes, frag);
3051 }
3052
3053 static void mapping_state_2 (enum mstate state, int max_chars);
3054
3055 /* Set the mapping state to STATE. Only call this when about to
3056 emit some STATE bytes to the file. */
3057
3058 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
3059 void
3060 mapping_state (enum mstate state)
3061 {
3062 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3063
3064 if (mapstate == state)
3065 /* The mapping symbol has already been emitted.
3066 There is nothing else to do. */
3067 return;
3068
3069 if (state == MAP_ARM || state == MAP_THUMB)
3070 /* PR gas/12931
3071 All ARM instructions require 4-byte alignment.
3072 (Almost) all Thumb instructions require 2-byte alignment.
3073
3074 When emitting instructions into any section, mark the section
3075 appropriately.
3076
3077 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3078 but themselves require 2-byte alignment; this applies to some
3079 PC- relative forms. However, these cases will involve implicit
3080 literal pool generation or an explicit .align >=2, both of
3081 which will cause the section to me marked with sufficient
3082 alignment. Thus, we don't handle those cases here. */
3083 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3084
3085 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3086 /* This case will be evaluated later. */
3087 return;
3088
3089 mapping_state_2 (state, 0);
3090 }
3091
3092 /* Same as mapping_state, but MAX_CHARS bytes have already been
3093 allocated. Put the mapping symbol that far back. */
3094
3095 static void
3096 mapping_state_2 (enum mstate state, int max_chars)
3097 {
3098 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3099
3100 if (!SEG_NORMAL (now_seg))
3101 return;
3102
3103 if (mapstate == state)
3104 /* The mapping symbol has already been emitted.
3105 There is nothing else to do. */
3106 return;
3107
3108 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3109 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3110 {
3111 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3112 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3113
3114 if (add_symbol)
3115 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3116 }
3117
3118 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3119 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3120 }
3121 #undef TRANSITION
3122 #else
3123 #define mapping_state(x) ((void)0)
3124 #define mapping_state_2(x, y) ((void)0)
3125 #endif
3126
3127 /* Find the real, Thumb encoded start of a Thumb function. */
3128
3129 #ifdef OBJ_COFF
3130 static symbolS *
3131 find_real_start (symbolS * symbolP)
3132 {
3133 char * real_start;
3134 const char * name = S_GET_NAME (symbolP);
3135 symbolS * new_target;
3136
3137 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
3138 #define STUB_NAME ".real_start_of"
3139
3140 if (name == NULL)
3141 abort ();
3142
3143 /* The compiler may generate BL instructions to local labels because
3144 it needs to perform a branch to a far away location. These labels
3145 do not have a corresponding ".real_start_of" label. We check
3146 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3147 the ".real_start_of" convention for nonlocal branches. */
3148 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3149 return symbolP;
3150
3151 real_start = concat (STUB_NAME, name, NULL);
3152 new_target = symbol_find (real_start);
3153 free (real_start);
3154
3155 if (new_target == NULL)
3156 {
3157 as_warn (_("Failed to find real start of function: %s\n"), name);
3158 new_target = symbolP;
3159 }
3160
3161 return new_target;
3162 }
3163 #endif
3164
3165 static void
3166 opcode_select (int width)
3167 {
3168 switch (width)
3169 {
3170 case 16:
3171 if (! thumb_mode)
3172 {
3173 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3174 as_bad (_("selected processor does not support THUMB opcodes"));
3175
3176 thumb_mode = 1;
3177 /* No need to force the alignment, since we will have been
3178 coming from ARM mode, which is word-aligned. */
3179 record_alignment (now_seg, 1);
3180 }
3181 break;
3182
3183 case 32:
3184 if (thumb_mode)
3185 {
3186 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3187 as_bad (_("selected processor does not support ARM opcodes"));
3188
3189 thumb_mode = 0;
3190
3191 if (!need_pass_2)
3192 frag_align (2, 0, 0);
3193
3194 record_alignment (now_seg, 1);
3195 }
3196 break;
3197
3198 default:
3199 as_bad (_("invalid instruction size selected (%d)"), width);
3200 }
3201 }
3202
3203 static void
3204 s_arm (int ignore ATTRIBUTE_UNUSED)
3205 {
3206 opcode_select (32);
3207 demand_empty_rest_of_line ();
3208 }
3209
3210 static void
3211 s_thumb (int ignore ATTRIBUTE_UNUSED)
3212 {
3213 opcode_select (16);
3214 demand_empty_rest_of_line ();
3215 }
3216
3217 static void
3218 s_code (int unused ATTRIBUTE_UNUSED)
3219 {
3220 int temp;
3221
3222 temp = get_absolute_expression ();
3223 switch (temp)
3224 {
3225 case 16:
3226 case 32:
3227 opcode_select (temp);
3228 break;
3229
3230 default:
3231 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3232 }
3233 }
3234
3235 static void
3236 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3237 {
3238 /* If we are not already in thumb mode go into it, EVEN if
3239 the target processor does not support thumb instructions.
3240 This is used by gcc/config/arm/lib1funcs.asm for example
3241 to compile interworking support functions even if the
3242 target processor should not support interworking. */
3243 if (! thumb_mode)
3244 {
3245 thumb_mode = 2;
3246 record_alignment (now_seg, 1);
3247 }
3248
3249 demand_empty_rest_of_line ();
3250 }
3251
3252 static void
3253 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3254 {
3255 s_thumb (0);
3256
3257 /* The following label is the name/address of the start of a Thumb function.
3258 We need to know this for the interworking support. */
3259 label_is_thumb_function_name = TRUE;
3260 }
3261
3262 /* Perform a .set directive, but also mark the alias as
3263 being a thumb function. */
3264
3265 static void
3266 s_thumb_set (int equiv)
3267 {
3268 /* XXX the following is a duplicate of the code for s_set() in read.c
3269 We cannot just call that code as we need to get at the symbol that
3270 is created. */
3271 char * name;
3272 char delim;
3273 char * end_name;
3274 symbolS * symbolP;
3275
3276 /* Especial apologies for the random logic:
3277 This just grew, and could be parsed much more simply!
3278 Dean - in haste. */
3279 delim = get_symbol_name (& name);
3280 end_name = input_line_pointer;
3281 (void) restore_line_pointer (delim);
3282
3283 if (*input_line_pointer != ',')
3284 {
3285 *end_name = 0;
3286 as_bad (_("expected comma after name \"%s\""), name);
3287 *end_name = delim;
3288 ignore_rest_of_line ();
3289 return;
3290 }
3291
3292 input_line_pointer++;
3293 *end_name = 0;
3294
3295 if (name[0] == '.' && name[1] == '\0')
3296 {
3297 /* XXX - this should not happen to .thumb_set. */
3298 abort ();
3299 }
3300
3301 if ((symbolP = symbol_find (name)) == NULL
3302 && (symbolP = md_undefined_symbol (name)) == NULL)
3303 {
3304 #ifndef NO_LISTING
3305 /* When doing symbol listings, play games with dummy fragments living
3306 outside the normal fragment chain to record the file and line info
3307 for this symbol. */
3308 if (listing & LISTING_SYMBOLS)
3309 {
3310 extern struct list_info_struct * listing_tail;
3311 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3312
3313 memset (dummy_frag, 0, sizeof (fragS));
3314 dummy_frag->fr_type = rs_fill;
3315 dummy_frag->line = listing_tail;
3316 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3317 dummy_frag->fr_symbol = symbolP;
3318 }
3319 else
3320 #endif
3321 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3322
3323 #ifdef OBJ_COFF
3324 /* "set" symbols are local unless otherwise specified. */
3325 SF_SET_LOCAL (symbolP);
3326 #endif /* OBJ_COFF */
3327 } /* Make a new symbol. */
3328
3329 symbol_table_insert (symbolP);
3330
3331 * end_name = delim;
3332
3333 if (equiv
3334 && S_IS_DEFINED (symbolP)
3335 && S_GET_SEGMENT (symbolP) != reg_section)
3336 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3337
3338 pseudo_set (symbolP);
3339
3340 demand_empty_rest_of_line ();
3341
3342 /* XXX Now we come to the Thumb specific bit of code. */
3343
3344 THUMB_SET_FUNC (symbolP, 1);
3345 ARM_SET_THUMB (symbolP, 1);
3346 #if defined OBJ_ELF || defined OBJ_COFF
3347 ARM_SET_INTERWORK (symbolP, support_interwork);
3348 #endif
3349 }
3350
3351 /* Directives: Mode selection. */
3352
3353 /* .syntax [unified|divided] - choose the new unified syntax
3354 (same for Arm and Thumb encoding, modulo slight differences in what
3355 can be represented) or the old divergent syntax for each mode. */
3356 static void
3357 s_syntax (int unused ATTRIBUTE_UNUSED)
3358 {
3359 char *name, delim;
3360
3361 delim = get_symbol_name (& name);
3362
3363 if (!strcasecmp (name, "unified"))
3364 unified_syntax = TRUE;
3365 else if (!strcasecmp (name, "divided"))
3366 unified_syntax = FALSE;
3367 else
3368 {
3369 as_bad (_("unrecognized syntax mode \"%s\""), name);
3370 return;
3371 }
3372 (void) restore_line_pointer (delim);
3373 demand_empty_rest_of_line ();
3374 }
3375
3376 /* Directives: sectioning and alignment. */
3377
3378 static void
3379 s_bss (int ignore ATTRIBUTE_UNUSED)
3380 {
3381 /* We don't support putting frags in the BSS segment, we fake it by
3382 marking in_bss, then looking at s_skip for clues. */
3383 subseg_set (bss_section, 0);
3384 demand_empty_rest_of_line ();
3385
3386 #ifdef md_elf_section_change_hook
3387 md_elf_section_change_hook ();
3388 #endif
3389 }
3390
3391 static void
3392 s_even (int ignore ATTRIBUTE_UNUSED)
3393 {
3394 /* Never make frag if expect extra pass. */
3395 if (!need_pass_2)
3396 frag_align (1, 0, 0);
3397
3398 record_alignment (now_seg, 1);
3399
3400 demand_empty_rest_of_line ();
3401 }
3402
3403 /* Directives: CodeComposer Studio. */
3404
3405 /* .ref (for CodeComposer Studio syntax only). */
3406 static void
3407 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3408 {
3409 if (codecomposer_syntax)
3410 ignore_rest_of_line ();
3411 else
3412 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3413 }
3414
3415 /* If name is not NULL, then it is used for marking the beginning of a
3416 function, whereas if it is NULL then it means the function end. */
3417 static void
3418 asmfunc_debug (const char * name)
3419 {
3420 static const char * last_name = NULL;
3421
3422 if (name != NULL)
3423 {
3424 gas_assert (last_name == NULL);
3425 last_name = name;
3426
3427 if (debug_type == DEBUG_STABS)
3428 stabs_generate_asm_func (name, name);
3429 }
3430 else
3431 {
3432 gas_assert (last_name != NULL);
3433
3434 if (debug_type == DEBUG_STABS)
3435 stabs_generate_asm_endfunc (last_name, last_name);
3436
3437 last_name = NULL;
3438 }
3439 }
3440
3441 static void
3442 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3443 {
3444 if (codecomposer_syntax)
3445 {
3446 switch (asmfunc_state)
3447 {
3448 case OUTSIDE_ASMFUNC:
3449 asmfunc_state = WAITING_ASMFUNC_NAME;
3450 break;
3451
3452 case WAITING_ASMFUNC_NAME:
3453 as_bad (_(".asmfunc repeated."));
3454 break;
3455
3456 case WAITING_ENDASMFUNC:
3457 as_bad (_(".asmfunc without function."));
3458 break;
3459 }
3460 demand_empty_rest_of_line ();
3461 }
3462 else
3463 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3464 }
3465
3466 static void
3467 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3468 {
3469 if (codecomposer_syntax)
3470 {
3471 switch (asmfunc_state)
3472 {
3473 case OUTSIDE_ASMFUNC:
3474 as_bad (_(".endasmfunc without a .asmfunc."));
3475 break;
3476
3477 case WAITING_ASMFUNC_NAME:
3478 as_bad (_(".endasmfunc without function."));
3479 break;
3480
3481 case WAITING_ENDASMFUNC:
3482 asmfunc_state = OUTSIDE_ASMFUNC;
3483 asmfunc_debug (NULL);
3484 break;
3485 }
3486 demand_empty_rest_of_line ();
3487 }
3488 else
3489 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3490 }
3491
3492 static void
3493 s_ccs_def (int name)
3494 {
3495 if (codecomposer_syntax)
3496 s_globl (name);
3497 else
3498 as_bad (_(".def pseudo-op only available with -mccs flag."));
3499 }
3500
3501 /* Directives: Literal pools. */
3502
3503 static literal_pool *
3504 find_literal_pool (void)
3505 {
3506 literal_pool * pool;
3507
3508 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3509 {
3510 if (pool->section == now_seg
3511 && pool->sub_section == now_subseg)
3512 break;
3513 }
3514
3515 return pool;
3516 }
3517
3518 static literal_pool *
3519 find_or_make_literal_pool (void)
3520 {
3521 /* Next literal pool ID number. */
3522 static unsigned int latest_pool_num = 1;
3523 literal_pool * pool;
3524
3525 pool = find_literal_pool ();
3526
3527 if (pool == NULL)
3528 {
3529 /* Create a new pool. */
3530 pool = XNEW (literal_pool);
3531 if (! pool)
3532 return NULL;
3533
3534 pool->next_free_entry = 0;
3535 pool->section = now_seg;
3536 pool->sub_section = now_subseg;
3537 pool->next = list_of_pools;
3538 pool->symbol = NULL;
3539 pool->alignment = 2;
3540
3541 /* Add it to the list. */
3542 list_of_pools = pool;
3543 }
3544
3545 /* New pools, and emptied pools, will have a NULL symbol. */
3546 if (pool->symbol == NULL)
3547 {
3548 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3549 (valueT) 0, &zero_address_frag);
3550 pool->id = latest_pool_num ++;
3551 }
3552
3553 /* Done. */
3554 return pool;
3555 }
3556
3557 /* Add the literal in the global 'inst'
3558 structure to the relevant literal pool. */
3559
3560 static int
3561 add_to_lit_pool (unsigned int nbytes)
3562 {
3563 #define PADDING_SLOT 0x1
3564 #define LIT_ENTRY_SIZE_MASK 0xFF
3565 literal_pool * pool;
3566 unsigned int entry, pool_size = 0;
3567 bfd_boolean padding_slot_p = FALSE;
3568 unsigned imm1 = 0;
3569 unsigned imm2 = 0;
3570
3571 if (nbytes == 8)
3572 {
3573 imm1 = inst.operands[1].imm;
3574 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3575 : inst.relocs[0].exp.X_unsigned ? 0
3576 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3577 if (target_big_endian)
3578 {
3579 imm1 = imm2;
3580 imm2 = inst.operands[1].imm;
3581 }
3582 }
3583
3584 pool = find_or_make_literal_pool ();
3585
3586 /* Check if this literal value is already in the pool. */
3587 for (entry = 0; entry < pool->next_free_entry; entry ++)
3588 {
3589 if (nbytes == 4)
3590 {
3591 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3592 && (inst.relocs[0].exp.X_op == O_constant)
3593 && (pool->literals[entry].X_add_number
3594 == inst.relocs[0].exp.X_add_number)
3595 && (pool->literals[entry].X_md == nbytes)
3596 && (pool->literals[entry].X_unsigned
3597 == inst.relocs[0].exp.X_unsigned))
3598 break;
3599
3600 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3601 && (inst.relocs[0].exp.X_op == O_symbol)
3602 && (pool->literals[entry].X_add_number
3603 == inst.relocs[0].exp.X_add_number)
3604 && (pool->literals[entry].X_add_symbol
3605 == inst.relocs[0].exp.X_add_symbol)
3606 && (pool->literals[entry].X_op_symbol
3607 == inst.relocs[0].exp.X_op_symbol)
3608 && (pool->literals[entry].X_md == nbytes))
3609 break;
3610 }
3611 else if ((nbytes == 8)
3612 && !(pool_size & 0x7)
3613 && ((entry + 1) != pool->next_free_entry)
3614 && (pool->literals[entry].X_op == O_constant)
3615 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3616 && (pool->literals[entry].X_unsigned
3617 == inst.relocs[0].exp.X_unsigned)
3618 && (pool->literals[entry + 1].X_op == O_constant)
3619 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3620 && (pool->literals[entry + 1].X_unsigned
3621 == inst.relocs[0].exp.X_unsigned))
3622 break;
3623
3624 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3625 if (padding_slot_p && (nbytes == 4))
3626 break;
3627
3628 pool_size += 4;
3629 }
3630
3631 /* Do we need to create a new entry? */
3632 if (entry == pool->next_free_entry)
3633 {
3634 if (entry >= MAX_LITERAL_POOL_SIZE)
3635 {
3636 inst.error = _("literal pool overflow");
3637 return FAIL;
3638 }
3639
3640 if (nbytes == 8)
3641 {
3642 /* For 8-byte entries, we align to an 8-byte boundary,
3643 and split it into two 4-byte entries, because on 32-bit
3644 host, 8-byte constants are treated as big num, thus
3645 saved in "generic_bignum" which will be overwritten
3646 by later assignments.
3647
3648 We also need to make sure there is enough space for
3649 the split.
3650
3651 We also check to make sure the literal operand is a
3652 constant number. */
3653 if (!(inst.relocs[0].exp.X_op == O_constant
3654 || inst.relocs[0].exp.X_op == O_big))
3655 {
3656 inst.error = _("invalid type for literal pool");
3657 return FAIL;
3658 }
3659 else if (pool_size & 0x7)
3660 {
3661 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3662 {
3663 inst.error = _("literal pool overflow");
3664 return FAIL;
3665 }
3666
3667 pool->literals[entry] = inst.relocs[0].exp;
3668 pool->literals[entry].X_op = O_constant;
3669 pool->literals[entry].X_add_number = 0;
3670 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3671 pool->next_free_entry += 1;
3672 pool_size += 4;
3673 }
3674 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3675 {
3676 inst.error = _("literal pool overflow");
3677 return FAIL;
3678 }
3679
3680 pool->literals[entry] = inst.relocs[0].exp;
3681 pool->literals[entry].X_op = O_constant;
3682 pool->literals[entry].X_add_number = imm1;
3683 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3684 pool->literals[entry++].X_md = 4;
3685 pool->literals[entry] = inst.relocs[0].exp;
3686 pool->literals[entry].X_op = O_constant;
3687 pool->literals[entry].X_add_number = imm2;
3688 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3689 pool->literals[entry].X_md = 4;
3690 pool->alignment = 3;
3691 pool->next_free_entry += 1;
3692 }
3693 else
3694 {
3695 pool->literals[entry] = inst.relocs[0].exp;
3696 pool->literals[entry].X_md = 4;
3697 }
3698
3699 #ifdef OBJ_ELF
3700 /* PR ld/12974: Record the location of the first source line to reference
3701 this entry in the literal pool. If it turns out during linking that the
3702 symbol does not exist we will be able to give an accurate line number for
3703 the (first use of the) missing reference. */
3704 if (debug_type == DEBUG_DWARF2)
3705 dwarf2_where (pool->locs + entry);
3706 #endif
3707 pool->next_free_entry += 1;
3708 }
3709 else if (padding_slot_p)
3710 {
3711 pool->literals[entry] = inst.relocs[0].exp;
3712 pool->literals[entry].X_md = nbytes;
3713 }
3714
3715 inst.relocs[0].exp.X_op = O_symbol;
3716 inst.relocs[0].exp.X_add_number = pool_size;
3717 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3718
3719 return SUCCESS;
3720 }
3721
3722 bfd_boolean
3723 tc_start_label_without_colon (void)
3724 {
3725 bfd_boolean ret = TRUE;
3726
3727 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3728 {
3729 const char *label = input_line_pointer;
3730
3731 while (!is_end_of_line[(int) label[-1]])
3732 --label;
3733
3734 if (*label == '.')
3735 {
3736 as_bad (_("Invalid label '%s'"), label);
3737 ret = FALSE;
3738 }
3739
3740 asmfunc_debug (label);
3741
3742 asmfunc_state = WAITING_ENDASMFUNC;
3743 }
3744
3745 return ret;
3746 }
3747
3748 /* Can't use symbol_new here, so have to create a symbol and then at
3749 a later date assign it a value. That's what these functions do. */
3750
3751 static void
3752 symbol_locate (symbolS * symbolP,
3753 const char * name, /* It is copied, the caller can modify. */
3754 segT segment, /* Segment identifier (SEG_<something>). */
3755 valueT valu, /* Symbol value. */
3756 fragS * frag) /* Associated fragment. */
3757 {
3758 size_t name_length;
3759 char * preserved_copy_of_name;
3760
3761 name_length = strlen (name) + 1; /* +1 for \0. */
3762 obstack_grow (&notes, name, name_length);
3763 preserved_copy_of_name = (char *) obstack_finish (&notes);
3764
3765 #ifdef tc_canonicalize_symbol_name
3766 preserved_copy_of_name =
3767 tc_canonicalize_symbol_name (preserved_copy_of_name);
3768 #endif
3769
3770 S_SET_NAME (symbolP, preserved_copy_of_name);
3771
3772 S_SET_SEGMENT (symbolP, segment);
3773 S_SET_VALUE (symbolP, valu);
3774 symbol_clear_list_pointers (symbolP);
3775
3776 symbol_set_frag (symbolP, frag);
3777
3778 /* Link to end of symbol chain. */
3779 {
3780 extern int symbol_table_frozen;
3781
3782 if (symbol_table_frozen)
3783 abort ();
3784 }
3785
3786 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3787
3788 obj_symbol_new_hook (symbolP);
3789
3790 #ifdef tc_symbol_new_hook
3791 tc_symbol_new_hook (symbolP);
3792 #endif
3793
3794 #ifdef DEBUG_SYMS
3795 verify_symbol_chain (symbol_rootP, symbol_lastP);
3796 #endif /* DEBUG_SYMS */
3797 }
3798
3799 static void
3800 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3801 {
3802 unsigned int entry;
3803 literal_pool * pool;
3804 char sym_name[20];
3805
3806 pool = find_literal_pool ();
3807 if (pool == NULL
3808 || pool->symbol == NULL
3809 || pool->next_free_entry == 0)
3810 return;
3811
3812 /* Align pool as you have word accesses.
3813 Only make a frag if we have to. */
3814 if (!need_pass_2)
3815 frag_align (pool->alignment, 0, 0);
3816
3817 record_alignment (now_seg, 2);
3818
3819 #ifdef OBJ_ELF
3820 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3821 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3822 #endif
3823 sprintf (sym_name, "$$lit_\002%x", pool->id);
3824
3825 symbol_locate (pool->symbol, sym_name, now_seg,
3826 (valueT) frag_now_fix (), frag_now);
3827 symbol_table_insert (pool->symbol);
3828
3829 ARM_SET_THUMB (pool->symbol, thumb_mode);
3830
3831 #if defined OBJ_COFF || defined OBJ_ELF
3832 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3833 #endif
3834
3835 for (entry = 0; entry < pool->next_free_entry; entry ++)
3836 {
3837 #ifdef OBJ_ELF
3838 if (debug_type == DEBUG_DWARF2)
3839 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3840 #endif
3841 /* First output the expression in the instruction to the pool. */
3842 emit_expr (&(pool->literals[entry]),
3843 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3844 }
3845
3846 /* Mark the pool as empty. */
3847 pool->next_free_entry = 0;
3848 pool->symbol = NULL;
3849 }
3850
3851 #ifdef OBJ_ELF
3852 /* Forward declarations for functions below, in the MD interface
3853 section. */
3854 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3855 static valueT create_unwind_entry (int);
3856 static void start_unwind_section (const segT, int);
3857 static void add_unwind_opcode (valueT, int);
3858 static void flush_pending_unwind (void);
3859
3860 /* Directives: Data. */
3861
3862 static void
3863 s_arm_elf_cons (int nbytes)
3864 {
3865 expressionS exp;
3866
3867 #ifdef md_flush_pending_output
3868 md_flush_pending_output ();
3869 #endif
3870
3871 if (is_it_end_of_statement ())
3872 {
3873 demand_empty_rest_of_line ();
3874 return;
3875 }
3876
3877 #ifdef md_cons_align
3878 md_cons_align (nbytes);
3879 #endif
3880
3881 mapping_state (MAP_DATA);
3882 do
3883 {
3884 int reloc;
3885 char *base = input_line_pointer;
3886
3887 expression (& exp);
3888
3889 if (exp.X_op != O_symbol)
3890 emit_expr (&exp, (unsigned int) nbytes);
3891 else
3892 {
3893 char *before_reloc = input_line_pointer;
3894 reloc = parse_reloc (&input_line_pointer);
3895 if (reloc == -1)
3896 {
3897 as_bad (_("unrecognized relocation suffix"));
3898 ignore_rest_of_line ();
3899 return;
3900 }
3901 else if (reloc == BFD_RELOC_UNUSED)
3902 emit_expr (&exp, (unsigned int) nbytes);
3903 else
3904 {
3905 reloc_howto_type *howto = (reloc_howto_type *)
3906 bfd_reloc_type_lookup (stdoutput,
3907 (bfd_reloc_code_real_type) reloc);
3908 int size = bfd_get_reloc_size (howto);
3909
3910 if (reloc == BFD_RELOC_ARM_PLT32)
3911 {
3912 as_bad (_("(plt) is only valid on branch targets"));
3913 reloc = BFD_RELOC_UNUSED;
3914 size = 0;
3915 }
3916
3917 if (size > nbytes)
3918 as_bad (ngettext ("%s relocations do not fit in %d byte",
3919 "%s relocations do not fit in %d bytes",
3920 nbytes),
3921 howto->name, nbytes);
3922 else
3923 {
3924 /* We've parsed an expression stopping at O_symbol.
3925 But there may be more expression left now that we
3926 have parsed the relocation marker. Parse it again.
3927 XXX Surely there is a cleaner way to do this. */
3928 char *p = input_line_pointer;
3929 int offset;
3930 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3931
3932 memcpy (save_buf, base, input_line_pointer - base);
3933 memmove (base + (input_line_pointer - before_reloc),
3934 base, before_reloc - base);
3935
3936 input_line_pointer = base + (input_line_pointer-before_reloc);
3937 expression (&exp);
3938 memcpy (base, save_buf, p - base);
3939
3940 offset = nbytes - size;
3941 p = frag_more (nbytes);
3942 memset (p, 0, nbytes);
3943 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3944 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3945 free (save_buf);
3946 }
3947 }
3948 }
3949 }
3950 while (*input_line_pointer++ == ',');
3951
3952 /* Put terminator back into stream. */
3953 input_line_pointer --;
3954 demand_empty_rest_of_line ();
3955 }
3956
3957 /* Emit an expression containing a 32-bit thumb instruction.
3958 Implementation based on put_thumb32_insn. */
3959
3960 static void
3961 emit_thumb32_expr (expressionS * exp)
3962 {
3963 expressionS exp_high = *exp;
3964
3965 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3966 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3967 exp->X_add_number &= 0xffff;
3968 emit_expr (exp, (unsigned int) THUMB_SIZE);
3969 }
3970
3971 /* Guess the instruction size based on the opcode. */
3972
3973 static int
3974 thumb_insn_size (int opcode)
3975 {
3976 if ((unsigned int) opcode < 0xe800u)
3977 return 2;
3978 else if ((unsigned int) opcode >= 0xe8000000u)
3979 return 4;
3980 else
3981 return 0;
3982 }
3983
3984 static bfd_boolean
3985 emit_insn (expressionS *exp, int nbytes)
3986 {
3987 int size = 0;
3988
3989 if (exp->X_op == O_constant)
3990 {
3991 size = nbytes;
3992
3993 if (size == 0)
3994 size = thumb_insn_size (exp->X_add_number);
3995
3996 if (size != 0)
3997 {
3998 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3999 {
4000 as_bad (_(".inst.n operand too big. "\
4001 "Use .inst.w instead"));
4002 size = 0;
4003 }
4004 else
4005 {
4006 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
4007 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
4008 else
4009 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
4010
4011 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
4012 emit_thumb32_expr (exp);
4013 else
4014 emit_expr (exp, (unsigned int) size);
4015
4016 it_fsm_post_encode ();
4017 }
4018 }
4019 else
4020 as_bad (_("cannot determine Thumb instruction size. " \
4021 "Use .inst.n/.inst.w instead"));
4022 }
4023 else
4024 as_bad (_("constant expression required"));
4025
4026 return (size != 0);
4027 }
4028
4029 /* Like s_arm_elf_cons but do not use md_cons_align and
4030 set the mapping state to MAP_ARM/MAP_THUMB. */
4031
4032 static void
4033 s_arm_elf_inst (int nbytes)
4034 {
4035 if (is_it_end_of_statement ())
4036 {
4037 demand_empty_rest_of_line ();
4038 return;
4039 }
4040
4041 /* Calling mapping_state () here will not change ARM/THUMB,
4042 but will ensure not to be in DATA state. */
4043
4044 if (thumb_mode)
4045 mapping_state (MAP_THUMB);
4046 else
4047 {
4048 if (nbytes != 0)
4049 {
4050 as_bad (_("width suffixes are invalid in ARM mode"));
4051 ignore_rest_of_line ();
4052 return;
4053 }
4054
4055 nbytes = 4;
4056
4057 mapping_state (MAP_ARM);
4058 }
4059
4060 do
4061 {
4062 expressionS exp;
4063
4064 expression (& exp);
4065
4066 if (! emit_insn (& exp, nbytes))
4067 {
4068 ignore_rest_of_line ();
4069 return;
4070 }
4071 }
4072 while (*input_line_pointer++ == ',');
4073
4074 /* Put terminator back into stream. */
4075 input_line_pointer --;
4076 demand_empty_rest_of_line ();
4077 }
4078
4079 /* Parse a .rel31 directive. */
4080
4081 static void
4082 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4083 {
4084 expressionS exp;
4085 char *p;
4086 valueT highbit;
4087
4088 highbit = 0;
4089 if (*input_line_pointer == '1')
4090 highbit = 0x80000000;
4091 else if (*input_line_pointer != '0')
4092 as_bad (_("expected 0 or 1"));
4093
4094 input_line_pointer++;
4095 if (*input_line_pointer != ',')
4096 as_bad (_("missing comma"));
4097 input_line_pointer++;
4098
4099 #ifdef md_flush_pending_output
4100 md_flush_pending_output ();
4101 #endif
4102
4103 #ifdef md_cons_align
4104 md_cons_align (4);
4105 #endif
4106
4107 mapping_state (MAP_DATA);
4108
4109 expression (&exp);
4110
4111 p = frag_more (4);
4112 md_number_to_chars (p, highbit, 4);
4113 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4114 BFD_RELOC_ARM_PREL31);
4115
4116 demand_empty_rest_of_line ();
4117 }
4118
4119 /* Directives: AEABI stack-unwind tables. */
4120
4121 /* Parse an unwind_fnstart directive. Simply records the current location. */
4122
4123 static void
4124 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4125 {
4126 demand_empty_rest_of_line ();
4127 if (unwind.proc_start)
4128 {
4129 as_bad (_("duplicate .fnstart directive"));
4130 return;
4131 }
4132
4133 /* Mark the start of the function. */
4134 unwind.proc_start = expr_build_dot ();
4135
4136 /* Reset the rest of the unwind info. */
4137 unwind.opcode_count = 0;
4138 unwind.table_entry = NULL;
4139 unwind.personality_routine = NULL;
4140 unwind.personality_index = -1;
4141 unwind.frame_size = 0;
4142 unwind.fp_offset = 0;
4143 unwind.fp_reg = REG_SP;
4144 unwind.fp_used = 0;
4145 unwind.sp_restored = 0;
4146 }
4147
4148
4149 /* Parse a handlerdata directive. Creates the exception handling table entry
4150 for the function. */
4151
4152 static void
4153 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4154 {
4155 demand_empty_rest_of_line ();
4156 if (!unwind.proc_start)
4157 as_bad (MISSING_FNSTART);
4158
4159 if (unwind.table_entry)
4160 as_bad (_("duplicate .handlerdata directive"));
4161
4162 create_unwind_entry (1);
4163 }
4164
4165 /* Parse an unwind_fnend directive. Generates the index table entry. */
4166
4167 static void
4168 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4169 {
4170 long where;
4171 char *ptr;
4172 valueT val;
4173 unsigned int marked_pr_dependency;
4174
4175 demand_empty_rest_of_line ();
4176
4177 if (!unwind.proc_start)
4178 {
4179 as_bad (_(".fnend directive without .fnstart"));
4180 return;
4181 }
4182
4183 /* Add eh table entry. */
4184 if (unwind.table_entry == NULL)
4185 val = create_unwind_entry (0);
4186 else
4187 val = 0;
4188
4189 /* Add index table entry. This is two words. */
4190 start_unwind_section (unwind.saved_seg, 1);
4191 frag_align (2, 0, 0);
4192 record_alignment (now_seg, 2);
4193
4194 ptr = frag_more (8);
4195 memset (ptr, 0, 8);
4196 where = frag_now_fix () - 8;
4197
4198 /* Self relative offset of the function start. */
4199 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4200 BFD_RELOC_ARM_PREL31);
4201
4202 /* Indicate dependency on EHABI-defined personality routines to the
4203 linker, if it hasn't been done already. */
4204 marked_pr_dependency
4205 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4206 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4207 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4208 {
4209 static const char *const name[] =
4210 {
4211 "__aeabi_unwind_cpp_pr0",
4212 "__aeabi_unwind_cpp_pr1",
4213 "__aeabi_unwind_cpp_pr2"
4214 };
4215 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4216 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4217 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4218 |= 1 << unwind.personality_index;
4219 }
4220
4221 if (val)
4222 /* Inline exception table entry. */
4223 md_number_to_chars (ptr + 4, val, 4);
4224 else
4225 /* Self relative offset of the table entry. */
4226 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4227 BFD_RELOC_ARM_PREL31);
4228
4229 /* Restore the original section. */
4230 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4231
4232 unwind.proc_start = NULL;
4233 }
4234
4235
4236 /* Parse an unwind_cantunwind directive. */
4237
4238 static void
4239 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4240 {
4241 demand_empty_rest_of_line ();
4242 if (!unwind.proc_start)
4243 as_bad (MISSING_FNSTART);
4244
4245 if (unwind.personality_routine || unwind.personality_index != -1)
4246 as_bad (_("personality routine specified for cantunwind frame"));
4247
4248 unwind.personality_index = -2;
4249 }
4250
4251
4252 /* Parse a personalityindex directive. */
4253
4254 static void
4255 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4256 {
4257 expressionS exp;
4258
4259 if (!unwind.proc_start)
4260 as_bad (MISSING_FNSTART);
4261
4262 if (unwind.personality_routine || unwind.personality_index != -1)
4263 as_bad (_("duplicate .personalityindex directive"));
4264
4265 expression (&exp);
4266
4267 if (exp.X_op != O_constant
4268 || exp.X_add_number < 0 || exp.X_add_number > 15)
4269 {
4270 as_bad (_("bad personality routine number"));
4271 ignore_rest_of_line ();
4272 return;
4273 }
4274
4275 unwind.personality_index = exp.X_add_number;
4276
4277 demand_empty_rest_of_line ();
4278 }
4279
4280
4281 /* Parse a personality directive. */
4282
4283 static void
4284 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4285 {
4286 char *name, *p, c;
4287
4288 if (!unwind.proc_start)
4289 as_bad (MISSING_FNSTART);
4290
4291 if (unwind.personality_routine || unwind.personality_index != -1)
4292 as_bad (_("duplicate .personality directive"));
4293
4294 c = get_symbol_name (& name);
4295 p = input_line_pointer;
4296 if (c == '"')
4297 ++ input_line_pointer;
4298 unwind.personality_routine = symbol_find_or_make (name);
4299 *p = c;
4300 demand_empty_rest_of_line ();
4301 }
4302
4303
4304 /* Parse a directive saving core registers. */
4305
4306 static void
4307 s_arm_unwind_save_core (void)
4308 {
4309 valueT op;
4310 long range;
4311 int n;
4312
4313 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4314 if (range == FAIL)
4315 {
4316 as_bad (_("expected register list"));
4317 ignore_rest_of_line ();
4318 return;
4319 }
4320
4321 demand_empty_rest_of_line ();
4322
4323 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4324 into .unwind_save {..., sp...}. We aren't bothered about the value of
4325 ip because it is clobbered by calls. */
4326 if (unwind.sp_restored && unwind.fp_reg == 12
4327 && (range & 0x3000) == 0x1000)
4328 {
4329 unwind.opcode_count--;
4330 unwind.sp_restored = 0;
4331 range = (range | 0x2000) & ~0x1000;
4332 unwind.pending_offset = 0;
4333 }
4334
4335 /* Pop r4-r15. */
4336 if (range & 0xfff0)
4337 {
4338 /* See if we can use the short opcodes. These pop a block of up to 8
4339 registers starting with r4, plus maybe r14. */
4340 for (n = 0; n < 8; n++)
4341 {
4342 /* Break at the first non-saved register. */
4343 if ((range & (1 << (n + 4))) == 0)
4344 break;
4345 }
4346 /* See if there are any other bits set. */
4347 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4348 {
4349 /* Use the long form. */
4350 op = 0x8000 | ((range >> 4) & 0xfff);
4351 add_unwind_opcode (op, 2);
4352 }
4353 else
4354 {
4355 /* Use the short form. */
4356 if (range & 0x4000)
4357 op = 0xa8; /* Pop r14. */
4358 else
4359 op = 0xa0; /* Do not pop r14. */
4360 op |= (n - 1);
4361 add_unwind_opcode (op, 1);
4362 }
4363 }
4364
4365 /* Pop r0-r3. */
4366 if (range & 0xf)
4367 {
4368 op = 0xb100 | (range & 0xf);
4369 add_unwind_opcode (op, 2);
4370 }
4371
4372 /* Record the number of bytes pushed. */
4373 for (n = 0; n < 16; n++)
4374 {
4375 if (range & (1 << n))
4376 unwind.frame_size += 4;
4377 }
4378 }
4379
4380
4381 /* Parse a directive saving FPA registers. */
4382
4383 static void
4384 s_arm_unwind_save_fpa (int reg)
4385 {
4386 expressionS exp;
4387 int num_regs;
4388 valueT op;
4389
4390 /* Get Number of registers to transfer. */
4391 if (skip_past_comma (&input_line_pointer) != FAIL)
4392 expression (&exp);
4393 else
4394 exp.X_op = O_illegal;
4395
4396 if (exp.X_op != O_constant)
4397 {
4398 as_bad (_("expected , <constant>"));
4399 ignore_rest_of_line ();
4400 return;
4401 }
4402
4403 num_regs = exp.X_add_number;
4404
4405 if (num_regs < 1 || num_regs > 4)
4406 {
4407 as_bad (_("number of registers must be in the range [1:4]"));
4408 ignore_rest_of_line ();
4409 return;
4410 }
4411
4412 demand_empty_rest_of_line ();
4413
4414 if (reg == 4)
4415 {
4416 /* Short form. */
4417 op = 0xb4 | (num_regs - 1);
4418 add_unwind_opcode (op, 1);
4419 }
4420 else
4421 {
4422 /* Long form. */
4423 op = 0xc800 | (reg << 4) | (num_regs - 1);
4424 add_unwind_opcode (op, 2);
4425 }
4426 unwind.frame_size += num_regs * 12;
4427 }
4428
4429
4430 /* Parse a directive saving VFP registers for ARMv6 and above. */
4431
4432 static void
4433 s_arm_unwind_save_vfp_armv6 (void)
4434 {
4435 int count;
4436 unsigned int start;
4437 valueT op;
4438 int num_vfpv3_regs = 0;
4439 int num_regs_below_16;
4440 bfd_boolean partial_match;
4441
4442 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4443 &partial_match);
4444 if (count == FAIL)
4445 {
4446 as_bad (_("expected register list"));
4447 ignore_rest_of_line ();
4448 return;
4449 }
4450
4451 demand_empty_rest_of_line ();
4452
4453 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4454 than FSTMX/FLDMX-style ones). */
4455
4456 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4457 if (start >= 16)
4458 num_vfpv3_regs = count;
4459 else if (start + count > 16)
4460 num_vfpv3_regs = start + count - 16;
4461
4462 if (num_vfpv3_regs > 0)
4463 {
4464 int start_offset = start > 16 ? start - 16 : 0;
4465 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4466 add_unwind_opcode (op, 2);
4467 }
4468
4469 /* Generate opcode for registers numbered in the range 0 .. 15. */
4470 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4471 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4472 if (num_regs_below_16 > 0)
4473 {
4474 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4475 add_unwind_opcode (op, 2);
4476 }
4477
4478 unwind.frame_size += count * 8;
4479 }
4480
4481
4482 /* Parse a directive saving VFP registers for pre-ARMv6. */
4483
4484 static void
4485 s_arm_unwind_save_vfp (void)
4486 {
4487 int count;
4488 unsigned int reg;
4489 valueT op;
4490 bfd_boolean partial_match;
4491
4492 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4493 &partial_match);
4494 if (count == FAIL)
4495 {
4496 as_bad (_("expected register list"));
4497 ignore_rest_of_line ();
4498 return;
4499 }
4500
4501 demand_empty_rest_of_line ();
4502
4503 if (reg == 8)
4504 {
4505 /* Short form. */
4506 op = 0xb8 | (count - 1);
4507 add_unwind_opcode (op, 1);
4508 }
4509 else
4510 {
4511 /* Long form. */
4512 op = 0xb300 | (reg << 4) | (count - 1);
4513 add_unwind_opcode (op, 2);
4514 }
4515 unwind.frame_size += count * 8 + 4;
4516 }
4517
4518
4519 /* Parse a directive saving iWMMXt data registers. */
4520
4521 static void
4522 s_arm_unwind_save_mmxwr (void)
4523 {
4524 int reg;
4525 int hi_reg;
4526 int i;
4527 unsigned mask = 0;
4528 valueT op;
4529
4530 if (*input_line_pointer == '{')
4531 input_line_pointer++;
4532
4533 do
4534 {
4535 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4536
4537 if (reg == FAIL)
4538 {
4539 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4540 goto error;
4541 }
4542
4543 if (mask >> reg)
4544 as_tsktsk (_("register list not in ascending order"));
4545 mask |= 1 << reg;
4546
4547 if (*input_line_pointer == '-')
4548 {
4549 input_line_pointer++;
4550 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4551 if (hi_reg == FAIL)
4552 {
4553 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4554 goto error;
4555 }
4556 else if (reg >= hi_reg)
4557 {
4558 as_bad (_("bad register range"));
4559 goto error;
4560 }
4561 for (; reg < hi_reg; reg++)
4562 mask |= 1 << reg;
4563 }
4564 }
4565 while (skip_past_comma (&input_line_pointer) != FAIL);
4566
4567 skip_past_char (&input_line_pointer, '}');
4568
4569 demand_empty_rest_of_line ();
4570
4571 /* Generate any deferred opcodes because we're going to be looking at
4572 the list. */
4573 flush_pending_unwind ();
4574
4575 for (i = 0; i < 16; i++)
4576 {
4577 if (mask & (1 << i))
4578 unwind.frame_size += 8;
4579 }
4580
4581 /* Attempt to combine with a previous opcode. We do this because gcc
4582 likes to output separate unwind directives for a single block of
4583 registers. */
4584 if (unwind.opcode_count > 0)
4585 {
4586 i = unwind.opcodes[unwind.opcode_count - 1];
4587 if ((i & 0xf8) == 0xc0)
4588 {
4589 i &= 7;
4590 /* Only merge if the blocks are contiguous. */
4591 if (i < 6)
4592 {
4593 if ((mask & 0xfe00) == (1 << 9))
4594 {
4595 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4596 unwind.opcode_count--;
4597 }
4598 }
4599 else if (i == 6 && unwind.opcode_count >= 2)
4600 {
4601 i = unwind.opcodes[unwind.opcode_count - 2];
4602 reg = i >> 4;
4603 i &= 0xf;
4604
4605 op = 0xffff << (reg - 1);
4606 if (reg > 0
4607 && ((mask & op) == (1u << (reg - 1))))
4608 {
4609 op = (1 << (reg + i + 1)) - 1;
4610 op &= ~((1 << reg) - 1);
4611 mask |= op;
4612 unwind.opcode_count -= 2;
4613 }
4614 }
4615 }
4616 }
4617
4618 hi_reg = 15;
4619 /* We want to generate opcodes in the order the registers have been
4620 saved, ie. descending order. */
4621 for (reg = 15; reg >= -1; reg--)
4622 {
4623 /* Save registers in blocks. */
4624 if (reg < 0
4625 || !(mask & (1 << reg)))
4626 {
4627 /* We found an unsaved reg. Generate opcodes to save the
4628 preceding block. */
4629 if (reg != hi_reg)
4630 {
4631 if (reg == 9)
4632 {
4633 /* Short form. */
4634 op = 0xc0 | (hi_reg - 10);
4635 add_unwind_opcode (op, 1);
4636 }
4637 else
4638 {
4639 /* Long form. */
4640 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4641 add_unwind_opcode (op, 2);
4642 }
4643 }
4644 hi_reg = reg - 1;
4645 }
4646 }
4647
4648 return;
4649 error:
4650 ignore_rest_of_line ();
4651 }
4652
4653 static void
4654 s_arm_unwind_save_mmxwcg (void)
4655 {
4656 int reg;
4657 int hi_reg;
4658 unsigned mask = 0;
4659 valueT op;
4660
4661 if (*input_line_pointer == '{')
4662 input_line_pointer++;
4663
4664 skip_whitespace (input_line_pointer);
4665
4666 do
4667 {
4668 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4669
4670 if (reg == FAIL)
4671 {
4672 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4673 goto error;
4674 }
4675
4676 reg -= 8;
4677 if (mask >> reg)
4678 as_tsktsk (_("register list not in ascending order"));
4679 mask |= 1 << reg;
4680
4681 if (*input_line_pointer == '-')
4682 {
4683 input_line_pointer++;
4684 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4685 if (hi_reg == FAIL)
4686 {
4687 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4688 goto error;
4689 }
4690 else if (reg >= hi_reg)
4691 {
4692 as_bad (_("bad register range"));
4693 goto error;
4694 }
4695 for (; reg < hi_reg; reg++)
4696 mask |= 1 << reg;
4697 }
4698 }
4699 while (skip_past_comma (&input_line_pointer) != FAIL);
4700
4701 skip_past_char (&input_line_pointer, '}');
4702
4703 demand_empty_rest_of_line ();
4704
4705 /* Generate any deferred opcodes because we're going to be looking at
4706 the list. */
4707 flush_pending_unwind ();
4708
4709 for (reg = 0; reg < 16; reg++)
4710 {
4711 if (mask & (1 << reg))
4712 unwind.frame_size += 4;
4713 }
4714 op = 0xc700 | mask;
4715 add_unwind_opcode (op, 2);
4716 return;
4717 error:
4718 ignore_rest_of_line ();
4719 }
4720
4721
4722 /* Parse an unwind_save directive.
4723 If the argument is non-zero, this is a .vsave directive. */
4724
4725 static void
4726 s_arm_unwind_save (int arch_v6)
4727 {
4728 char *peek;
4729 struct reg_entry *reg;
4730 bfd_boolean had_brace = FALSE;
4731
4732 if (!unwind.proc_start)
4733 as_bad (MISSING_FNSTART);
4734
4735 /* Figure out what sort of save we have. */
4736 peek = input_line_pointer;
4737
4738 if (*peek == '{')
4739 {
4740 had_brace = TRUE;
4741 peek++;
4742 }
4743
4744 reg = arm_reg_parse_multi (&peek);
4745
4746 if (!reg)
4747 {
4748 as_bad (_("register expected"));
4749 ignore_rest_of_line ();
4750 return;
4751 }
4752
4753 switch (reg->type)
4754 {
4755 case REG_TYPE_FN:
4756 if (had_brace)
4757 {
4758 as_bad (_("FPA .unwind_save does not take a register list"));
4759 ignore_rest_of_line ();
4760 return;
4761 }
4762 input_line_pointer = peek;
4763 s_arm_unwind_save_fpa (reg->number);
4764 return;
4765
4766 case REG_TYPE_RN:
4767 s_arm_unwind_save_core ();
4768 return;
4769
4770 case REG_TYPE_VFD:
4771 if (arch_v6)
4772 s_arm_unwind_save_vfp_armv6 ();
4773 else
4774 s_arm_unwind_save_vfp ();
4775 return;
4776
4777 case REG_TYPE_MMXWR:
4778 s_arm_unwind_save_mmxwr ();
4779 return;
4780
4781 case REG_TYPE_MMXWCG:
4782 s_arm_unwind_save_mmxwcg ();
4783 return;
4784
4785 default:
4786 as_bad (_(".unwind_save does not support this kind of register"));
4787 ignore_rest_of_line ();
4788 }
4789 }
4790
4791
4792 /* Parse an unwind_movsp directive. */
4793
4794 static void
4795 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4796 {
4797 int reg;
4798 valueT op;
4799 int offset;
4800
4801 if (!unwind.proc_start)
4802 as_bad (MISSING_FNSTART);
4803
4804 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4805 if (reg == FAIL)
4806 {
4807 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4808 ignore_rest_of_line ();
4809 return;
4810 }
4811
4812 /* Optional constant. */
4813 if (skip_past_comma (&input_line_pointer) != FAIL)
4814 {
4815 if (immediate_for_directive (&offset) == FAIL)
4816 return;
4817 }
4818 else
4819 offset = 0;
4820
4821 demand_empty_rest_of_line ();
4822
4823 if (reg == REG_SP || reg == REG_PC)
4824 {
4825 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4826 return;
4827 }
4828
4829 if (unwind.fp_reg != REG_SP)
4830 as_bad (_("unexpected .unwind_movsp directive"));
4831
4832 /* Generate opcode to restore the value. */
4833 op = 0x90 | reg;
4834 add_unwind_opcode (op, 1);
4835
4836 /* Record the information for later. */
4837 unwind.fp_reg = reg;
4838 unwind.fp_offset = unwind.frame_size - offset;
4839 unwind.sp_restored = 1;
4840 }
4841
4842 /* Parse an unwind_pad directive. */
4843
4844 static void
4845 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4846 {
4847 int offset;
4848
4849 if (!unwind.proc_start)
4850 as_bad (MISSING_FNSTART);
4851
4852 if (immediate_for_directive (&offset) == FAIL)
4853 return;
4854
4855 if (offset & 3)
4856 {
4857 as_bad (_("stack increment must be multiple of 4"));
4858 ignore_rest_of_line ();
4859 return;
4860 }
4861
4862 /* Don't generate any opcodes, just record the details for later. */
4863 unwind.frame_size += offset;
4864 unwind.pending_offset += offset;
4865
4866 demand_empty_rest_of_line ();
4867 }
4868
4869 /* Parse an unwind_setfp directive. */
4870
4871 static void
4872 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4873 {
4874 int sp_reg;
4875 int fp_reg;
4876 int offset;
4877
4878 if (!unwind.proc_start)
4879 as_bad (MISSING_FNSTART);
4880
4881 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4882 if (skip_past_comma (&input_line_pointer) == FAIL)
4883 sp_reg = FAIL;
4884 else
4885 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4886
4887 if (fp_reg == FAIL || sp_reg == FAIL)
4888 {
4889 as_bad (_("expected <reg>, <reg>"));
4890 ignore_rest_of_line ();
4891 return;
4892 }
4893
4894 /* Optional constant. */
4895 if (skip_past_comma (&input_line_pointer) != FAIL)
4896 {
4897 if (immediate_for_directive (&offset) == FAIL)
4898 return;
4899 }
4900 else
4901 offset = 0;
4902
4903 demand_empty_rest_of_line ();
4904
4905 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4906 {
4907 as_bad (_("register must be either sp or set by a previous"
4908 "unwind_movsp directive"));
4909 return;
4910 }
4911
4912 /* Don't generate any opcodes, just record the information for later. */
4913 unwind.fp_reg = fp_reg;
4914 unwind.fp_used = 1;
4915 if (sp_reg == REG_SP)
4916 unwind.fp_offset = unwind.frame_size - offset;
4917 else
4918 unwind.fp_offset -= offset;
4919 }
4920
4921 /* Parse an unwind_raw directive. */
4922
4923 static void
4924 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4925 {
4926 expressionS exp;
4927 /* This is an arbitrary limit. */
4928 unsigned char op[16];
4929 int count;
4930
4931 if (!unwind.proc_start)
4932 as_bad (MISSING_FNSTART);
4933
4934 expression (&exp);
4935 if (exp.X_op == O_constant
4936 && skip_past_comma (&input_line_pointer) != FAIL)
4937 {
4938 unwind.frame_size += exp.X_add_number;
4939 expression (&exp);
4940 }
4941 else
4942 exp.X_op = O_illegal;
4943
4944 if (exp.X_op != O_constant)
4945 {
4946 as_bad (_("expected <offset>, <opcode>"));
4947 ignore_rest_of_line ();
4948 return;
4949 }
4950
4951 count = 0;
4952
4953 /* Parse the opcode. */
4954 for (;;)
4955 {
4956 if (count >= 16)
4957 {
4958 as_bad (_("unwind opcode too long"));
4959 ignore_rest_of_line ();
4960 }
4961 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4962 {
4963 as_bad (_("invalid unwind opcode"));
4964 ignore_rest_of_line ();
4965 return;
4966 }
4967 op[count++] = exp.X_add_number;
4968
4969 /* Parse the next byte. */
4970 if (skip_past_comma (&input_line_pointer) == FAIL)
4971 break;
4972
4973 expression (&exp);
4974 }
4975
4976 /* Add the opcode bytes in reverse order. */
4977 while (count--)
4978 add_unwind_opcode (op[count], 1);
4979
4980 demand_empty_rest_of_line ();
4981 }
4982
4983
4984 /* Parse a .eabi_attribute directive. */
4985
4986 static void
4987 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4988 {
4989 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4990
4991 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4992 attributes_set_explicitly[tag] = 1;
4993 }
4994
4995 /* Emit a tls fix for the symbol. */
4996
4997 static void
4998 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4999 {
5000 char *p;
5001 expressionS exp;
5002 #ifdef md_flush_pending_output
5003 md_flush_pending_output ();
5004 #endif
5005
5006 #ifdef md_cons_align
5007 md_cons_align (4);
5008 #endif
5009
5010 /* Since we're just labelling the code, there's no need to define a
5011 mapping symbol. */
5012 expression (&exp);
5013 p = obstack_next_free (&frchain_now->frch_obstack);
5014 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
5015 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
5016 : BFD_RELOC_ARM_TLS_DESCSEQ);
5017 }
5018 #endif /* OBJ_ELF */
5019
5020 static void s_arm_arch (int);
5021 static void s_arm_object_arch (int);
5022 static void s_arm_cpu (int);
5023 static void s_arm_fpu (int);
5024 static void s_arm_arch_extension (int);
5025
5026 #ifdef TE_PE
5027
5028 static void
5029 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5030 {
5031 expressionS exp;
5032
5033 do
5034 {
5035 expression (&exp);
5036 if (exp.X_op == O_symbol)
5037 exp.X_op = O_secrel;
5038
5039 emit_expr (&exp, 4);
5040 }
5041 while (*input_line_pointer++ == ',');
5042
5043 input_line_pointer--;
5044 demand_empty_rest_of_line ();
5045 }
5046 #endif /* TE_PE */
5047
5048 int
5049 arm_is_largest_exponent_ok (int precision)
5050 {
5051 /* precision == 1 ensures that this will only return
5052 true for 16 bit floats. */
5053 return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5054 }
5055
5056 static void
5057 set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5058 {
5059 char saved_char;
5060 char* name;
5061 enum fp_16bit_format new_format;
5062
5063 new_format = ARM_FP16_FORMAT_DEFAULT;
5064
5065 name = input_line_pointer;
5066 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5067 input_line_pointer++;
5068
5069 saved_char = *input_line_pointer;
5070 *input_line_pointer = 0;
5071
5072 if (strcasecmp (name, "ieee") == 0)
5073 new_format = ARM_FP16_FORMAT_IEEE;
5074 else if (strcasecmp (name, "alternative") == 0)
5075 new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5076 else
5077 {
5078 as_bad (_("unrecognised float16 format \"%s\""), name);
5079 goto cleanup;
5080 }
5081
5082 /* Only set fp16_format if it is still the default (aka not already
5083 been set yet). */
5084 if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5085 fp16_format = new_format;
5086 else
5087 {
5088 if (new_format != fp16_format)
5089 as_warn (_("float16 format cannot be set more than once, ignoring."));
5090 }
5091
5092 cleanup:
5093 *input_line_pointer = saved_char;
5094 ignore_rest_of_line ();
5095 }
5096
5097 /* This table describes all the machine specific pseudo-ops the assembler
5098 has to support. The fields are:
5099 pseudo-op name without dot
5100 function to call to execute this pseudo-op
5101 Integer arg to pass to the function. */
5102
5103 const pseudo_typeS md_pseudo_table[] =
5104 {
5105 /* Never called because '.req' does not start a line. */
5106 { "req", s_req, 0 },
5107 /* Following two are likewise never called. */
5108 { "dn", s_dn, 0 },
5109 { "qn", s_qn, 0 },
5110 { "unreq", s_unreq, 0 },
5111 { "bss", s_bss, 0 },
5112 { "align", s_align_ptwo, 2 },
5113 { "arm", s_arm, 0 },
5114 { "thumb", s_thumb, 0 },
5115 { "code", s_code, 0 },
5116 { "force_thumb", s_force_thumb, 0 },
5117 { "thumb_func", s_thumb_func, 0 },
5118 { "thumb_set", s_thumb_set, 0 },
5119 { "even", s_even, 0 },
5120 { "ltorg", s_ltorg, 0 },
5121 { "pool", s_ltorg, 0 },
5122 { "syntax", s_syntax, 0 },
5123 { "cpu", s_arm_cpu, 0 },
5124 { "arch", s_arm_arch, 0 },
5125 { "object_arch", s_arm_object_arch, 0 },
5126 { "fpu", s_arm_fpu, 0 },
5127 { "arch_extension", s_arm_arch_extension, 0 },
5128 #ifdef OBJ_ELF
5129 { "word", s_arm_elf_cons, 4 },
5130 { "long", s_arm_elf_cons, 4 },
5131 { "inst.n", s_arm_elf_inst, 2 },
5132 { "inst.w", s_arm_elf_inst, 4 },
5133 { "inst", s_arm_elf_inst, 0 },
5134 { "rel31", s_arm_rel31, 0 },
5135 { "fnstart", s_arm_unwind_fnstart, 0 },
5136 { "fnend", s_arm_unwind_fnend, 0 },
5137 { "cantunwind", s_arm_unwind_cantunwind, 0 },
5138 { "personality", s_arm_unwind_personality, 0 },
5139 { "personalityindex", s_arm_unwind_personalityindex, 0 },
5140 { "handlerdata", s_arm_unwind_handlerdata, 0 },
5141 { "save", s_arm_unwind_save, 0 },
5142 { "vsave", s_arm_unwind_save, 1 },
5143 { "movsp", s_arm_unwind_movsp, 0 },
5144 { "pad", s_arm_unwind_pad, 0 },
5145 { "setfp", s_arm_unwind_setfp, 0 },
5146 { "unwind_raw", s_arm_unwind_raw, 0 },
5147 { "eabi_attribute", s_arm_eabi_attribute, 0 },
5148 { "tlsdescseq", s_arm_tls_descseq, 0 },
5149 #else
5150 { "word", cons, 4},
5151
5152 /* These are used for dwarf. */
5153 {"2byte", cons, 2},
5154 {"4byte", cons, 4},
5155 {"8byte", cons, 8},
5156 /* These are used for dwarf2. */
5157 { "file", dwarf2_directive_file, 0 },
5158 { "loc", dwarf2_directive_loc, 0 },
5159 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5160 #endif
5161 { "extend", float_cons, 'x' },
5162 { "ldouble", float_cons, 'x' },
5163 { "packed", float_cons, 'p' },
5164 { "bfloat16", float_cons, 'b' },
5165 #ifdef TE_PE
5166 {"secrel32", pe_directive_secrel, 0},
5167 #endif
5168
5169 /* These are for compatibility with CodeComposer Studio. */
5170 {"ref", s_ccs_ref, 0},
5171 {"def", s_ccs_def, 0},
5172 {"asmfunc", s_ccs_asmfunc, 0},
5173 {"endasmfunc", s_ccs_endasmfunc, 0},
5174
5175 {"float16", float_cons, 'h' },
5176 {"float16_format", set_fp16_format, 0 },
5177
5178 { 0, 0, 0 }
5179 };
5180
5181 /* Parser functions used exclusively in instruction operands. */
5182
5183 /* Generic immediate-value read function for use in insn parsing.
5184 STR points to the beginning of the immediate (the leading #);
5185 VAL receives the value; if the value is outside [MIN, MAX]
5186 issue an error. PREFIX_OPT is true if the immediate prefix is
5187 optional. */
5188
5189 static int
5190 parse_immediate (char **str, int *val, int min, int max,
5191 bfd_boolean prefix_opt)
5192 {
5193 expressionS exp;
5194
5195 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5196 if (exp.X_op != O_constant)
5197 {
5198 inst.error = _("constant expression required");
5199 return FAIL;
5200 }
5201
5202 if (exp.X_add_number < min || exp.X_add_number > max)
5203 {
5204 inst.error = _("immediate value out of range");
5205 return FAIL;
5206 }
5207
5208 *val = exp.X_add_number;
5209 return SUCCESS;
5210 }
5211
5212 /* Less-generic immediate-value read function with the possibility of loading a
5213 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5214 instructions. Puts the result directly in inst.operands[i]. */
5215
5216 static int
5217 parse_big_immediate (char **str, int i, expressionS *in_exp,
5218 bfd_boolean allow_symbol_p)
5219 {
5220 expressionS exp;
5221 expressionS *exp_p = in_exp ? in_exp : &exp;
5222 char *ptr = *str;
5223
5224 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5225
5226 if (exp_p->X_op == O_constant)
5227 {
5228 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5229 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5230 O_constant. We have to be careful not to break compilation for
5231 32-bit X_add_number, though. */
5232 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5233 {
5234 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5235 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5236 & 0xffffffff);
5237 inst.operands[i].regisimm = 1;
5238 }
5239 }
5240 else if (exp_p->X_op == O_big
5241 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5242 {
5243 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5244
5245 /* Bignums have their least significant bits in
5246 generic_bignum[0]. Make sure we put 32 bits in imm and
5247 32 bits in reg, in a (hopefully) portable way. */
5248 gas_assert (parts != 0);
5249
5250 /* Make sure that the number is not too big.
5251 PR 11972: Bignums can now be sign-extended to the
5252 size of a .octa so check that the out of range bits
5253 are all zero or all one. */
5254 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5255 {
5256 LITTLENUM_TYPE m = -1;
5257
5258 if (generic_bignum[parts * 2] != 0
5259 && generic_bignum[parts * 2] != m)
5260 return FAIL;
5261
5262 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5263 if (generic_bignum[j] != generic_bignum[j-1])
5264 return FAIL;
5265 }
5266
5267 inst.operands[i].imm = 0;
5268 for (j = 0; j < parts; j++, idx++)
5269 inst.operands[i].imm |= generic_bignum[idx]
5270 << (LITTLENUM_NUMBER_OF_BITS * j);
5271 inst.operands[i].reg = 0;
5272 for (j = 0; j < parts; j++, idx++)
5273 inst.operands[i].reg |= generic_bignum[idx]
5274 << (LITTLENUM_NUMBER_OF_BITS * j);
5275 inst.operands[i].regisimm = 1;
5276 }
5277 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5278 return FAIL;
5279
5280 *str = ptr;
5281
5282 return SUCCESS;
5283 }
5284
5285 /* Returns the pseudo-register number of an FPA immediate constant,
5286 or FAIL if there isn't a valid constant here. */
5287
5288 static int
5289 parse_fpa_immediate (char ** str)
5290 {
5291 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5292 char * save_in;
5293 expressionS exp;
5294 int i;
5295 int j;
5296
5297 /* First try and match exact strings, this is to guarantee
5298 that some formats will work even for cross assembly. */
5299
5300 for (i = 0; fp_const[i]; i++)
5301 {
5302 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5303 {
5304 char *start = *str;
5305
5306 *str += strlen (fp_const[i]);
5307 if (is_end_of_line[(unsigned char) **str])
5308 return i + 8;
5309 *str = start;
5310 }
5311 }
5312
5313 /* Just because we didn't get a match doesn't mean that the constant
5314 isn't valid, just that it is in a format that we don't
5315 automatically recognize. Try parsing it with the standard
5316 expression routines. */
5317
5318 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5319
5320 /* Look for a raw floating point number. */
5321 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5322 && is_end_of_line[(unsigned char) *save_in])
5323 {
5324 for (i = 0; i < NUM_FLOAT_VALS; i++)
5325 {
5326 for (j = 0; j < MAX_LITTLENUMS; j++)
5327 {
5328 if (words[j] != fp_values[i][j])
5329 break;
5330 }
5331
5332 if (j == MAX_LITTLENUMS)
5333 {
5334 *str = save_in;
5335 return i + 8;
5336 }
5337 }
5338 }
5339
5340 /* Try and parse a more complex expression, this will probably fail
5341 unless the code uses a floating point prefix (eg "0f"). */
5342 save_in = input_line_pointer;
5343 input_line_pointer = *str;
5344 if (expression (&exp) == absolute_section
5345 && exp.X_op == O_big
5346 && exp.X_add_number < 0)
5347 {
5348 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5349 Ditto for 15. */
5350 #define X_PRECISION 5
5351 #define E_PRECISION 15L
5352 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5353 {
5354 for (i = 0; i < NUM_FLOAT_VALS; i++)
5355 {
5356 for (j = 0; j < MAX_LITTLENUMS; j++)
5357 {
5358 if (words[j] != fp_values[i][j])
5359 break;
5360 }
5361
5362 if (j == MAX_LITTLENUMS)
5363 {
5364 *str = input_line_pointer;
5365 input_line_pointer = save_in;
5366 return i + 8;
5367 }
5368 }
5369 }
5370 }
5371
5372 *str = input_line_pointer;
5373 input_line_pointer = save_in;
5374 inst.error = _("invalid FPA immediate expression");
5375 return FAIL;
5376 }
5377
5378 /* Returns 1 if a number has "quarter-precision" float format
5379 0baBbbbbbc defgh000 00000000 00000000. */
5380
5381 static int
5382 is_quarter_float (unsigned imm)
5383 {
5384 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5385 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5386 }
5387
5388
5389 /* Detect the presence of a floating point or integer zero constant,
5390 i.e. #0.0 or #0. */
5391
5392 static bfd_boolean
5393 parse_ifimm_zero (char **in)
5394 {
5395 int error_code;
5396
5397 if (!is_immediate_prefix (**in))
5398 {
5399 /* In unified syntax, all prefixes are optional. */
5400 if (!unified_syntax)
5401 return FALSE;
5402 }
5403 else
5404 ++*in;
5405
5406 /* Accept #0x0 as a synonym for #0. */
5407 if (strncmp (*in, "0x", 2) == 0)
5408 {
5409 int val;
5410 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5411 return FALSE;
5412 return TRUE;
5413 }
5414
5415 error_code = atof_generic (in, ".", EXP_CHARS,
5416 &generic_floating_point_number);
5417
5418 if (!error_code
5419 && generic_floating_point_number.sign == '+'
5420 && (generic_floating_point_number.low
5421 > generic_floating_point_number.leader))
5422 return TRUE;
5423
5424 return FALSE;
5425 }
5426
5427 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5428 0baBbbbbbc defgh000 00000000 00000000.
5429 The zero and minus-zero cases need special handling, since they can't be
5430 encoded in the "quarter-precision" float format, but can nonetheless be
5431 loaded as integer constants. */
5432
5433 static unsigned
5434 parse_qfloat_immediate (char **ccp, int *immed)
5435 {
5436 char *str = *ccp;
5437 char *fpnum;
5438 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5439 int found_fpchar = 0;
5440
5441 skip_past_char (&str, '#');
5442
5443 /* We must not accidentally parse an integer as a floating-point number. Make
5444 sure that the value we parse is not an integer by checking for special
5445 characters '.' or 'e'.
5446 FIXME: This is a horrible hack, but doing better is tricky because type
5447 information isn't in a very usable state at parse time. */
5448 fpnum = str;
5449 skip_whitespace (fpnum);
5450
5451 if (strncmp (fpnum, "0x", 2) == 0)
5452 return FAIL;
5453 else
5454 {
5455 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5456 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5457 {
5458 found_fpchar = 1;
5459 break;
5460 }
5461
5462 if (!found_fpchar)
5463 return FAIL;
5464 }
5465
5466 if ((str = atof_ieee (str, 's', words)) != NULL)
5467 {
5468 unsigned fpword = 0;
5469 int i;
5470
5471 /* Our FP word must be 32 bits (single-precision FP). */
5472 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5473 {
5474 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5475 fpword |= words[i];
5476 }
5477
5478 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5479 *immed = fpword;
5480 else
5481 return FAIL;
5482
5483 *ccp = str;
5484
5485 return SUCCESS;
5486 }
5487
5488 return FAIL;
5489 }
5490
5491 /* Shift operands. */
5492 enum shift_kind
5493 {
5494 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5495 };
5496
5497 struct asm_shift_name
5498 {
5499 const char *name;
5500 enum shift_kind kind;
5501 };
5502
5503 /* Third argument to parse_shift. */
5504 enum parse_shift_mode
5505 {
5506 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5507 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5508 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5509 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5510 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5511 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5512 };
5513
5514 /* Parse a <shift> specifier on an ARM data processing instruction.
5515 This has three forms:
5516
5517 (LSL|LSR|ASL|ASR|ROR) Rs
5518 (LSL|LSR|ASL|ASR|ROR) #imm
5519 RRX
5520
5521 Note that ASL is assimilated to LSL in the instruction encoding, and
5522 RRX to ROR #0 (which cannot be written as such). */
5523
5524 static int
5525 parse_shift (char **str, int i, enum parse_shift_mode mode)
5526 {
5527 const struct asm_shift_name *shift_name;
5528 enum shift_kind shift;
5529 char *s = *str;
5530 char *p = s;
5531 int reg;
5532
5533 for (p = *str; ISALPHA (*p); p++)
5534 ;
5535
5536 if (p == *str)
5537 {
5538 inst.error = _("shift expression expected");
5539 return FAIL;
5540 }
5541
5542 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5543 p - *str);
5544
5545 if (shift_name == NULL)
5546 {
5547 inst.error = _("shift expression expected");
5548 return FAIL;
5549 }
5550
5551 shift = shift_name->kind;
5552
5553 switch (mode)
5554 {
5555 case NO_SHIFT_RESTRICT:
5556 case SHIFT_IMMEDIATE:
5557 if (shift == SHIFT_UXTW)
5558 {
5559 inst.error = _("'UXTW' not allowed here");
5560 return FAIL;
5561 }
5562 break;
5563
5564 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5565 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5566 {
5567 inst.error = _("'LSL' or 'ASR' required");
5568 return FAIL;
5569 }
5570 break;
5571
5572 case SHIFT_LSL_IMMEDIATE:
5573 if (shift != SHIFT_LSL)
5574 {
5575 inst.error = _("'LSL' required");
5576 return FAIL;
5577 }
5578 break;
5579
5580 case SHIFT_ASR_IMMEDIATE:
5581 if (shift != SHIFT_ASR)
5582 {
5583 inst.error = _("'ASR' required");
5584 return FAIL;
5585 }
5586 break;
5587 case SHIFT_UXTW_IMMEDIATE:
5588 if (shift != SHIFT_UXTW)
5589 {
5590 inst.error = _("'UXTW' required");
5591 return FAIL;
5592 }
5593 break;
5594
5595 default: abort ();
5596 }
5597
5598 if (shift != SHIFT_RRX)
5599 {
5600 /* Whitespace can appear here if the next thing is a bare digit. */
5601 skip_whitespace (p);
5602
5603 if (mode == NO_SHIFT_RESTRICT
5604 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5605 {
5606 inst.operands[i].imm = reg;
5607 inst.operands[i].immisreg = 1;
5608 }
5609 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5610 return FAIL;
5611 }
5612 inst.operands[i].shift_kind = shift;
5613 inst.operands[i].shifted = 1;
5614 *str = p;
5615 return SUCCESS;
5616 }
5617
5618 /* Parse a <shifter_operand> for an ARM data processing instruction:
5619
5620 #<immediate>
5621 #<immediate>, <rotate>
5622 <Rm>
5623 <Rm>, <shift>
5624
5625 where <shift> is defined by parse_shift above, and <rotate> is a
5626 multiple of 2 between 0 and 30. Validation of immediate operands
5627 is deferred to md_apply_fix. */
5628
5629 static int
5630 parse_shifter_operand (char **str, int i)
5631 {
5632 int value;
5633 expressionS exp;
5634
5635 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5636 {
5637 inst.operands[i].reg = value;
5638 inst.operands[i].isreg = 1;
5639
5640 /* parse_shift will override this if appropriate */
5641 inst.relocs[0].exp.X_op = O_constant;
5642 inst.relocs[0].exp.X_add_number = 0;
5643
5644 if (skip_past_comma (str) == FAIL)
5645 return SUCCESS;
5646
5647 /* Shift operation on register. */
5648 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5649 }
5650
5651 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5652 return FAIL;
5653
5654 if (skip_past_comma (str) == SUCCESS)
5655 {
5656 /* #x, y -- ie explicit rotation by Y. */
5657 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5658 return FAIL;
5659
5660 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5661 {
5662 inst.error = _("constant expression expected");
5663 return FAIL;
5664 }
5665
5666 value = exp.X_add_number;
5667 if (value < 0 || value > 30 || value % 2 != 0)
5668 {
5669 inst.error = _("invalid rotation");
5670 return FAIL;
5671 }
5672 if (inst.relocs[0].exp.X_add_number < 0
5673 || inst.relocs[0].exp.X_add_number > 255)
5674 {
5675 inst.error = _("invalid constant");
5676 return FAIL;
5677 }
5678
5679 /* Encode as specified. */
5680 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5681 return SUCCESS;
5682 }
5683
5684 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5685 inst.relocs[0].pc_rel = 0;
5686 return SUCCESS;
5687 }
5688
5689 /* Group relocation information. Each entry in the table contains the
5690 textual name of the relocation as may appear in assembler source
5691 and must end with a colon.
5692 Along with this textual name are the relocation codes to be used if
5693 the corresponding instruction is an ALU instruction (ADD or SUB only),
5694 an LDR, an LDRS, or an LDC. */
5695
5696 struct group_reloc_table_entry
5697 {
5698 const char *name;
5699 int alu_code;
5700 int ldr_code;
5701 int ldrs_code;
5702 int ldc_code;
5703 };
5704
5705 typedef enum
5706 {
5707 /* Varieties of non-ALU group relocation. */
5708
5709 GROUP_LDR,
5710 GROUP_LDRS,
5711 GROUP_LDC,
5712 GROUP_MVE
5713 } group_reloc_type;
5714
5715 static struct group_reloc_table_entry group_reloc_table[] =
5716 { /* Program counter relative: */
5717 { "pc_g0_nc",
5718 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5719 0, /* LDR */
5720 0, /* LDRS */
5721 0 }, /* LDC */
5722 { "pc_g0",
5723 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5724 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5725 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5726 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5727 { "pc_g1_nc",
5728 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5729 0, /* LDR */
5730 0, /* LDRS */
5731 0 }, /* LDC */
5732 { "pc_g1",
5733 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5734 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5735 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5736 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5737 { "pc_g2",
5738 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5739 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5740 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5741 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5742 /* Section base relative */
5743 { "sb_g0_nc",
5744 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5745 0, /* LDR */
5746 0, /* LDRS */
5747 0 }, /* LDC */
5748 { "sb_g0",
5749 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5750 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5751 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5752 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5753 { "sb_g1_nc",
5754 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5755 0, /* LDR */
5756 0, /* LDRS */
5757 0 }, /* LDC */
5758 { "sb_g1",
5759 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5760 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5761 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5762 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5763 { "sb_g2",
5764 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5765 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5766 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5767 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5768 /* Absolute thumb alu relocations. */
5769 { "lower0_7",
5770 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5771 0, /* LDR. */
5772 0, /* LDRS. */
5773 0 }, /* LDC. */
5774 { "lower8_15",
5775 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5776 0, /* LDR. */
5777 0, /* LDRS. */
5778 0 }, /* LDC. */
5779 { "upper0_7",
5780 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5781 0, /* LDR. */
5782 0, /* LDRS. */
5783 0 }, /* LDC. */
5784 { "upper8_15",
5785 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5786 0, /* LDR. */
5787 0, /* LDRS. */
5788 0 } }; /* LDC. */
5789
5790 /* Given the address of a pointer pointing to the textual name of a group
5791 relocation as may appear in assembler source, attempt to find its details
5792 in group_reloc_table. The pointer will be updated to the character after
5793 the trailing colon. On failure, FAIL will be returned; SUCCESS
5794 otherwise. On success, *entry will be updated to point at the relevant
5795 group_reloc_table entry. */
5796
5797 static int
5798 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5799 {
5800 unsigned int i;
5801 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5802 {
5803 int length = strlen (group_reloc_table[i].name);
5804
5805 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5806 && (*str)[length] == ':')
5807 {
5808 *out = &group_reloc_table[i];
5809 *str += (length + 1);
5810 return SUCCESS;
5811 }
5812 }
5813
5814 return FAIL;
5815 }
5816
5817 /* Parse a <shifter_operand> for an ARM data processing instruction
5818 (as for parse_shifter_operand) where group relocations are allowed:
5819
5820 #<immediate>
5821 #<immediate>, <rotate>
5822 #:<group_reloc>:<expression>
5823 <Rm>
5824 <Rm>, <shift>
5825
5826 where <group_reloc> is one of the strings defined in group_reloc_table.
5827 The hashes are optional.
5828
5829 Everything else is as for parse_shifter_operand. */
5830
5831 static parse_operand_result
5832 parse_shifter_operand_group_reloc (char **str, int i)
5833 {
5834 /* Determine if we have the sequence of characters #: or just :
5835 coming next. If we do, then we check for a group relocation.
5836 If we don't, punt the whole lot to parse_shifter_operand. */
5837
5838 if (((*str)[0] == '#' && (*str)[1] == ':')
5839 || (*str)[0] == ':')
5840 {
5841 struct group_reloc_table_entry *entry;
5842
5843 if ((*str)[0] == '#')
5844 (*str) += 2;
5845 else
5846 (*str)++;
5847
5848 /* Try to parse a group relocation. Anything else is an error. */
5849 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5850 {
5851 inst.error = _("unknown group relocation");
5852 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5853 }
5854
5855 /* We now have the group relocation table entry corresponding to
5856 the name in the assembler source. Next, we parse the expression. */
5857 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5858 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5859
5860 /* Record the relocation type (always the ALU variant here). */
5861 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5862 gas_assert (inst.relocs[0].type != 0);
5863
5864 return PARSE_OPERAND_SUCCESS;
5865 }
5866 else
5867 return parse_shifter_operand (str, i) == SUCCESS
5868 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5869
5870 /* Never reached. */
5871 }
5872
5873 /* Parse a Neon alignment expression. Information is written to
5874 inst.operands[i]. We assume the initial ':' has been skipped.
5875
5876 align .imm = align << 8, .immisalign=1, .preind=0 */
5877 static parse_operand_result
5878 parse_neon_alignment (char **str, int i)
5879 {
5880 char *p = *str;
5881 expressionS exp;
5882
5883 my_get_expression (&exp, &p, GE_NO_PREFIX);
5884
5885 if (exp.X_op != O_constant)
5886 {
5887 inst.error = _("alignment must be constant");
5888 return PARSE_OPERAND_FAIL;
5889 }
5890
5891 inst.operands[i].imm = exp.X_add_number << 8;
5892 inst.operands[i].immisalign = 1;
5893 /* Alignments are not pre-indexes. */
5894 inst.operands[i].preind = 0;
5895
5896 *str = p;
5897 return PARSE_OPERAND_SUCCESS;
5898 }
5899
5900 /* Parse all forms of an ARM address expression. Information is written
5901 to inst.operands[i] and/or inst.relocs[0].
5902
5903 Preindexed addressing (.preind=1):
5904
5905 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5906 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5907 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5908 .shift_kind=shift .relocs[0].exp=shift_imm
5909
5910 These three may have a trailing ! which causes .writeback to be set also.
5911
5912 Postindexed addressing (.postind=1, .writeback=1):
5913
5914 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5915 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5916 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5917 .shift_kind=shift .relocs[0].exp=shift_imm
5918
5919 Unindexed addressing (.preind=0, .postind=0):
5920
5921 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5922
5923 Other:
5924
5925 [Rn]{!} shorthand for [Rn,#0]{!}
5926 =immediate .isreg=0 .relocs[0].exp=immediate
5927 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5928
5929 It is the caller's responsibility to check for addressing modes not
5930 supported by the instruction, and to set inst.relocs[0].type. */
5931
5932 static parse_operand_result
5933 parse_address_main (char **str, int i, int group_relocations,
5934 group_reloc_type group_type)
5935 {
5936 char *p = *str;
5937 int reg;
5938
5939 if (skip_past_char (&p, '[') == FAIL)
5940 {
5941 if (skip_past_char (&p, '=') == FAIL)
5942 {
5943 /* Bare address - translate to PC-relative offset. */
5944 inst.relocs[0].pc_rel = 1;
5945 inst.operands[i].reg = REG_PC;
5946 inst.operands[i].isreg = 1;
5947 inst.operands[i].preind = 1;
5948
5949 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5950 return PARSE_OPERAND_FAIL;
5951 }
5952 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5953 /*allow_symbol_p=*/TRUE))
5954 return PARSE_OPERAND_FAIL;
5955
5956 *str = p;
5957 return PARSE_OPERAND_SUCCESS;
5958 }
5959
5960 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5961 skip_whitespace (p);
5962
5963 if (group_type == GROUP_MVE)
5964 {
5965 enum arm_reg_type rtype = REG_TYPE_MQ;
5966 struct neon_type_el et;
5967 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5968 {
5969 inst.operands[i].isquad = 1;
5970 }
5971 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5972 {
5973 inst.error = BAD_ADDR_MODE;
5974 return PARSE_OPERAND_FAIL;
5975 }
5976 }
5977 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5978 {
5979 if (group_type == GROUP_MVE)
5980 inst.error = BAD_ADDR_MODE;
5981 else
5982 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5983 return PARSE_OPERAND_FAIL;
5984 }
5985 inst.operands[i].reg = reg;
5986 inst.operands[i].isreg = 1;
5987
5988 if (skip_past_comma (&p) == SUCCESS)
5989 {
5990 inst.operands[i].preind = 1;
5991
5992 if (*p == '+') p++;
5993 else if (*p == '-') p++, inst.operands[i].negative = 1;
5994
5995 enum arm_reg_type rtype = REG_TYPE_MQ;
5996 struct neon_type_el et;
5997 if (group_type == GROUP_MVE
5998 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5999 {
6000 inst.operands[i].immisreg = 2;
6001 inst.operands[i].imm = reg;
6002
6003 if (skip_past_comma (&p) == SUCCESS)
6004 {
6005 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
6006 {
6007 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
6008 inst.relocs[0].exp.X_add_number = 0;
6009 }
6010 else
6011 return PARSE_OPERAND_FAIL;
6012 }
6013 }
6014 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6015 {
6016 inst.operands[i].imm = reg;
6017 inst.operands[i].immisreg = 1;
6018
6019 if (skip_past_comma (&p) == SUCCESS)
6020 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6021 return PARSE_OPERAND_FAIL;
6022 }
6023 else if (skip_past_char (&p, ':') == SUCCESS)
6024 {
6025 /* FIXME: '@' should be used here, but it's filtered out by generic
6026 code before we get to see it here. This may be subject to
6027 change. */
6028 parse_operand_result result = parse_neon_alignment (&p, i);
6029
6030 if (result != PARSE_OPERAND_SUCCESS)
6031 return result;
6032 }
6033 else
6034 {
6035 if (inst.operands[i].negative)
6036 {
6037 inst.operands[i].negative = 0;
6038 p--;
6039 }
6040
6041 if (group_relocations
6042 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6043 {
6044 struct group_reloc_table_entry *entry;
6045
6046 /* Skip over the #: or : sequence. */
6047 if (*p == '#')
6048 p += 2;
6049 else
6050 p++;
6051
6052 /* Try to parse a group relocation. Anything else is an
6053 error. */
6054 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6055 {
6056 inst.error = _("unknown group relocation");
6057 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6058 }
6059
6060 /* We now have the group relocation table entry corresponding to
6061 the name in the assembler source. Next, we parse the
6062 expression. */
6063 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6064 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6065
6066 /* Record the relocation type. */
6067 switch (group_type)
6068 {
6069 case GROUP_LDR:
6070 inst.relocs[0].type
6071 = (bfd_reloc_code_real_type) entry->ldr_code;
6072 break;
6073
6074 case GROUP_LDRS:
6075 inst.relocs[0].type
6076 = (bfd_reloc_code_real_type) entry->ldrs_code;
6077 break;
6078
6079 case GROUP_LDC:
6080 inst.relocs[0].type
6081 = (bfd_reloc_code_real_type) entry->ldc_code;
6082 break;
6083
6084 default:
6085 gas_assert (0);
6086 }
6087
6088 if (inst.relocs[0].type == 0)
6089 {
6090 inst.error = _("this group relocation is not allowed on this instruction");
6091 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6092 }
6093 }
6094 else
6095 {
6096 char *q = p;
6097
6098 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6099 return PARSE_OPERAND_FAIL;
6100 /* If the offset is 0, find out if it's a +0 or -0. */
6101 if (inst.relocs[0].exp.X_op == O_constant
6102 && inst.relocs[0].exp.X_add_number == 0)
6103 {
6104 skip_whitespace (q);
6105 if (*q == '#')
6106 {
6107 q++;
6108 skip_whitespace (q);
6109 }
6110 if (*q == '-')
6111 inst.operands[i].negative = 1;
6112 }
6113 }
6114 }
6115 }
6116 else if (skip_past_char (&p, ':') == SUCCESS)
6117 {
6118 /* FIXME: '@' should be used here, but it's filtered out by generic code
6119 before we get to see it here. This may be subject to change. */
6120 parse_operand_result result = parse_neon_alignment (&p, i);
6121
6122 if (result != PARSE_OPERAND_SUCCESS)
6123 return result;
6124 }
6125
6126 if (skip_past_char (&p, ']') == FAIL)
6127 {
6128 inst.error = _("']' expected");
6129 return PARSE_OPERAND_FAIL;
6130 }
6131
6132 if (skip_past_char (&p, '!') == SUCCESS)
6133 inst.operands[i].writeback = 1;
6134
6135 else if (skip_past_comma (&p) == SUCCESS)
6136 {
6137 if (skip_past_char (&p, '{') == SUCCESS)
6138 {
6139 /* [Rn], {expr} - unindexed, with option */
6140 if (parse_immediate (&p, &inst.operands[i].imm,
6141 0, 255, TRUE) == FAIL)
6142 return PARSE_OPERAND_FAIL;
6143
6144 if (skip_past_char (&p, '}') == FAIL)
6145 {
6146 inst.error = _("'}' expected at end of 'option' field");
6147 return PARSE_OPERAND_FAIL;
6148 }
6149 if (inst.operands[i].preind)
6150 {
6151 inst.error = _("cannot combine index with option");
6152 return PARSE_OPERAND_FAIL;
6153 }
6154 *str = p;
6155 return PARSE_OPERAND_SUCCESS;
6156 }
6157 else
6158 {
6159 inst.operands[i].postind = 1;
6160 inst.operands[i].writeback = 1;
6161
6162 if (inst.operands[i].preind)
6163 {
6164 inst.error = _("cannot combine pre- and post-indexing");
6165 return PARSE_OPERAND_FAIL;
6166 }
6167
6168 if (*p == '+') p++;
6169 else if (*p == '-') p++, inst.operands[i].negative = 1;
6170
6171 enum arm_reg_type rtype = REG_TYPE_MQ;
6172 struct neon_type_el et;
6173 if (group_type == GROUP_MVE
6174 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6175 {
6176 inst.operands[i].immisreg = 2;
6177 inst.operands[i].imm = reg;
6178 }
6179 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6180 {
6181 /* We might be using the immediate for alignment already. If we
6182 are, OR the register number into the low-order bits. */
6183 if (inst.operands[i].immisalign)
6184 inst.operands[i].imm |= reg;
6185 else
6186 inst.operands[i].imm = reg;
6187 inst.operands[i].immisreg = 1;
6188
6189 if (skip_past_comma (&p) == SUCCESS)
6190 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6191 return PARSE_OPERAND_FAIL;
6192 }
6193 else
6194 {
6195 char *q = p;
6196
6197 if (inst.operands[i].negative)
6198 {
6199 inst.operands[i].negative = 0;
6200 p--;
6201 }
6202 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6203 return PARSE_OPERAND_FAIL;
6204 /* If the offset is 0, find out if it's a +0 or -0. */
6205 if (inst.relocs[0].exp.X_op == O_constant
6206 && inst.relocs[0].exp.X_add_number == 0)
6207 {
6208 skip_whitespace (q);
6209 if (*q == '#')
6210 {
6211 q++;
6212 skip_whitespace (q);
6213 }
6214 if (*q == '-')
6215 inst.operands[i].negative = 1;
6216 }
6217 }
6218 }
6219 }
6220
6221 /* If at this point neither .preind nor .postind is set, we have a
6222 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6223 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6224 {
6225 inst.operands[i].preind = 1;
6226 inst.relocs[0].exp.X_op = O_constant;
6227 inst.relocs[0].exp.X_add_number = 0;
6228 }
6229 *str = p;
6230 return PARSE_OPERAND_SUCCESS;
6231 }
6232
6233 static int
6234 parse_address (char **str, int i)
6235 {
6236 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6237 ? SUCCESS : FAIL;
6238 }
6239
6240 static parse_operand_result
6241 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6242 {
6243 return parse_address_main (str, i, 1, type);
6244 }
6245
6246 /* Parse an operand for a MOVW or MOVT instruction. */
6247 static int
6248 parse_half (char **str)
6249 {
6250 char * p;
6251
6252 p = *str;
6253 skip_past_char (&p, '#');
6254 if (strncasecmp (p, ":lower16:", 9) == 0)
6255 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6256 else if (strncasecmp (p, ":upper16:", 9) == 0)
6257 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6258
6259 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6260 {
6261 p += 9;
6262 skip_whitespace (p);
6263 }
6264
6265 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6266 return FAIL;
6267
6268 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6269 {
6270 if (inst.relocs[0].exp.X_op != O_constant)
6271 {
6272 inst.error = _("constant expression expected");
6273 return FAIL;
6274 }
6275 if (inst.relocs[0].exp.X_add_number < 0
6276 || inst.relocs[0].exp.X_add_number > 0xffff)
6277 {
6278 inst.error = _("immediate value out of range");
6279 return FAIL;
6280 }
6281 }
6282 *str = p;
6283 return SUCCESS;
6284 }
6285
6286 /* Miscellaneous. */
6287
6288 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6289 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6290 static int
6291 parse_psr (char **str, bfd_boolean lhs)
6292 {
6293 char *p;
6294 unsigned long psr_field;
6295 const struct asm_psr *psr;
6296 char *start;
6297 bfd_boolean is_apsr = FALSE;
6298 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6299
6300 /* PR gas/12698: If the user has specified -march=all then m_profile will
6301 be TRUE, but we want to ignore it in this case as we are building for any
6302 CPU type, including non-m variants. */
6303 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6304 m_profile = FALSE;
6305
6306 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6307 feature for ease of use and backwards compatibility. */
6308 p = *str;
6309 if (strncasecmp (p, "SPSR", 4) == 0)
6310 {
6311 if (m_profile)
6312 goto unsupported_psr;
6313
6314 psr_field = SPSR_BIT;
6315 }
6316 else if (strncasecmp (p, "CPSR", 4) == 0)
6317 {
6318 if (m_profile)
6319 goto unsupported_psr;
6320
6321 psr_field = 0;
6322 }
6323 else if (strncasecmp (p, "APSR", 4) == 0)
6324 {
6325 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6326 and ARMv7-R architecture CPUs. */
6327 is_apsr = TRUE;
6328 psr_field = 0;
6329 }
6330 else if (m_profile)
6331 {
6332 start = p;
6333 do
6334 p++;
6335 while (ISALNUM (*p) || *p == '_');
6336
6337 if (strncasecmp (start, "iapsr", 5) == 0
6338 || strncasecmp (start, "eapsr", 5) == 0
6339 || strncasecmp (start, "xpsr", 4) == 0
6340 || strncasecmp (start, "psr", 3) == 0)
6341 p = start + strcspn (start, "rR") + 1;
6342
6343 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6344 p - start);
6345
6346 if (!psr)
6347 return FAIL;
6348
6349 /* If APSR is being written, a bitfield may be specified. Note that
6350 APSR itself is handled above. */
6351 if (psr->field <= 3)
6352 {
6353 psr_field = psr->field;
6354 is_apsr = TRUE;
6355 goto check_suffix;
6356 }
6357
6358 *str = p;
6359 /* M-profile MSR instructions have the mask field set to "10", except
6360 *PSR variants which modify APSR, which may use a different mask (and
6361 have been handled already). Do that by setting the PSR_f field
6362 here. */
6363 return psr->field | (lhs ? PSR_f : 0);
6364 }
6365 else
6366 goto unsupported_psr;
6367
6368 p += 4;
6369 check_suffix:
6370 if (*p == '_')
6371 {
6372 /* A suffix follows. */
6373 p++;
6374 start = p;
6375
6376 do
6377 p++;
6378 while (ISALNUM (*p) || *p == '_');
6379
6380 if (is_apsr)
6381 {
6382 /* APSR uses a notation for bits, rather than fields. */
6383 unsigned int nzcvq_bits = 0;
6384 unsigned int g_bit = 0;
6385 char *bit;
6386
6387 for (bit = start; bit != p; bit++)
6388 {
6389 switch (TOLOWER (*bit))
6390 {
6391 case 'n':
6392 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6393 break;
6394
6395 case 'z':
6396 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6397 break;
6398
6399 case 'c':
6400 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6401 break;
6402
6403 case 'v':
6404 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6405 break;
6406
6407 case 'q':
6408 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6409 break;
6410
6411 case 'g':
6412 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6413 break;
6414
6415 default:
6416 inst.error = _("unexpected bit specified after APSR");
6417 return FAIL;
6418 }
6419 }
6420
6421 if (nzcvq_bits == 0x1f)
6422 psr_field |= PSR_f;
6423
6424 if (g_bit == 0x1)
6425 {
6426 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6427 {
6428 inst.error = _("selected processor does not "
6429 "support DSP extension");
6430 return FAIL;
6431 }
6432
6433 psr_field |= PSR_s;
6434 }
6435
6436 if ((nzcvq_bits & 0x20) != 0
6437 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6438 || (g_bit & 0x2) != 0)
6439 {
6440 inst.error = _("bad bitmask specified after APSR");
6441 return FAIL;
6442 }
6443 }
6444 else
6445 {
6446 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6447 p - start);
6448 if (!psr)
6449 goto error;
6450
6451 psr_field |= psr->field;
6452 }
6453 }
6454 else
6455 {
6456 if (ISALNUM (*p))
6457 goto error; /* Garbage after "[CS]PSR". */
6458
6459 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6460 is deprecated, but allow it anyway. */
6461 if (is_apsr && lhs)
6462 {
6463 psr_field |= PSR_f;
6464 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6465 "deprecated"));
6466 }
6467 else if (!m_profile)
6468 /* These bits are never right for M-profile devices: don't set them
6469 (only code paths which read/write APSR reach here). */
6470 psr_field |= (PSR_c | PSR_f);
6471 }
6472 *str = p;
6473 return psr_field;
6474
6475 unsupported_psr:
6476 inst.error = _("selected processor does not support requested special "
6477 "purpose register");
6478 return FAIL;
6479
6480 error:
6481 inst.error = _("flag for {c}psr instruction expected");
6482 return FAIL;
6483 }
6484
6485 static int
6486 parse_sys_vldr_vstr (char **str)
6487 {
6488 unsigned i;
6489 int val = FAIL;
6490 struct {
6491 const char *name;
6492 int regl;
6493 int regh;
6494 } sysregs[] = {
6495 {"FPSCR", 0x1, 0x0},
6496 {"FPSCR_nzcvqc", 0x2, 0x0},
6497 {"VPR", 0x4, 0x1},
6498 {"P0", 0x5, 0x1},
6499 {"FPCXTNS", 0x6, 0x1},
6500 {"FPCXTS", 0x7, 0x1}
6501 };
6502 char *op_end = strchr (*str, ',');
6503 size_t op_strlen = op_end - *str;
6504
6505 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6506 {
6507 if (!strncmp (*str, sysregs[i].name, op_strlen))
6508 {
6509 val = sysregs[i].regl | (sysregs[i].regh << 3);
6510 *str = op_end;
6511 break;
6512 }
6513 }
6514
6515 return val;
6516 }
6517
6518 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6519 value suitable for splatting into the AIF field of the instruction. */
6520
6521 static int
6522 parse_cps_flags (char **str)
6523 {
6524 int val = 0;
6525 int saw_a_flag = 0;
6526 char *s = *str;
6527
6528 for (;;)
6529 switch (*s++)
6530 {
6531 case '\0': case ',':
6532 goto done;
6533
6534 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6535 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6536 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6537
6538 default:
6539 inst.error = _("unrecognized CPS flag");
6540 return FAIL;
6541 }
6542
6543 done:
6544 if (saw_a_flag == 0)
6545 {
6546 inst.error = _("missing CPS flags");
6547 return FAIL;
6548 }
6549
6550 *str = s - 1;
6551 return val;
6552 }
6553
6554 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6555 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6556
6557 static int
6558 parse_endian_specifier (char **str)
6559 {
6560 int little_endian;
6561 char *s = *str;
6562
6563 if (strncasecmp (s, "BE", 2))
6564 little_endian = 0;
6565 else if (strncasecmp (s, "LE", 2))
6566 little_endian = 1;
6567 else
6568 {
6569 inst.error = _("valid endian specifiers are be or le");
6570 return FAIL;
6571 }
6572
6573 if (ISALNUM (s[2]) || s[2] == '_')
6574 {
6575 inst.error = _("valid endian specifiers are be or le");
6576 return FAIL;
6577 }
6578
6579 *str = s + 2;
6580 return little_endian;
6581 }
6582
6583 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6584 value suitable for poking into the rotate field of an sxt or sxta
6585 instruction, or FAIL on error. */
6586
6587 static int
6588 parse_ror (char **str)
6589 {
6590 int rot;
6591 char *s = *str;
6592
6593 if (strncasecmp (s, "ROR", 3) == 0)
6594 s += 3;
6595 else
6596 {
6597 inst.error = _("missing rotation field after comma");
6598 return FAIL;
6599 }
6600
6601 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6602 return FAIL;
6603
6604 switch (rot)
6605 {
6606 case 0: *str = s; return 0x0;
6607 case 8: *str = s; return 0x1;
6608 case 16: *str = s; return 0x2;
6609 case 24: *str = s; return 0x3;
6610
6611 default:
6612 inst.error = _("rotation can only be 0, 8, 16, or 24");
6613 return FAIL;
6614 }
6615 }
6616
6617 /* Parse a conditional code (from conds[] below). The value returned is in the
6618 range 0 .. 14, or FAIL. */
6619 static int
6620 parse_cond (char **str)
6621 {
6622 char *q;
6623 const struct asm_cond *c;
6624 int n;
6625 /* Condition codes are always 2 characters, so matching up to
6626 3 characters is sufficient. */
6627 char cond[3];
6628
6629 q = *str;
6630 n = 0;
6631 while (ISALPHA (*q) && n < 3)
6632 {
6633 cond[n] = TOLOWER (*q);
6634 q++;
6635 n++;
6636 }
6637
6638 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6639 if (!c)
6640 {
6641 inst.error = _("condition required");
6642 return FAIL;
6643 }
6644
6645 *str = q;
6646 return c->value;
6647 }
6648
6649 /* Parse an option for a barrier instruction. Returns the encoding for the
6650 option, or FAIL. */
6651 static int
6652 parse_barrier (char **str)
6653 {
6654 char *p, *q;
6655 const struct asm_barrier_opt *o;
6656
6657 p = q = *str;
6658 while (ISALPHA (*q))
6659 q++;
6660
6661 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6662 q - p);
6663 if (!o)
6664 return FAIL;
6665
6666 if (!mark_feature_used (&o->arch))
6667 return FAIL;
6668
6669 *str = q;
6670 return o->value;
6671 }
6672
6673 /* Parse the operands of a table branch instruction. Similar to a memory
6674 operand. */
6675 static int
6676 parse_tb (char **str)
6677 {
6678 char * p = *str;
6679 int reg;
6680
6681 if (skip_past_char (&p, '[') == FAIL)
6682 {
6683 inst.error = _("'[' expected");
6684 return FAIL;
6685 }
6686
6687 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6688 {
6689 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6690 return FAIL;
6691 }
6692 inst.operands[0].reg = reg;
6693
6694 if (skip_past_comma (&p) == FAIL)
6695 {
6696 inst.error = _("',' expected");
6697 return FAIL;
6698 }
6699
6700 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6701 {
6702 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6703 return FAIL;
6704 }
6705 inst.operands[0].imm = reg;
6706
6707 if (skip_past_comma (&p) == SUCCESS)
6708 {
6709 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6710 return FAIL;
6711 if (inst.relocs[0].exp.X_add_number != 1)
6712 {
6713 inst.error = _("invalid shift");
6714 return FAIL;
6715 }
6716 inst.operands[0].shifted = 1;
6717 }
6718
6719 if (skip_past_char (&p, ']') == FAIL)
6720 {
6721 inst.error = _("']' expected");
6722 return FAIL;
6723 }
6724 *str = p;
6725 return SUCCESS;
6726 }
6727
6728 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6729 information on the types the operands can take and how they are encoded.
6730 Up to four operands may be read; this function handles setting the
6731 ".present" field for each read operand itself.
6732 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6733 else returns FAIL. */
6734
6735 static int
6736 parse_neon_mov (char **str, int *which_operand)
6737 {
6738 int i = *which_operand, val;
6739 enum arm_reg_type rtype;
6740 char *ptr = *str;
6741 struct neon_type_el optype;
6742
6743 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6744 {
6745 /* Cases 17 or 19. */
6746 inst.operands[i].reg = val;
6747 inst.operands[i].isvec = 1;
6748 inst.operands[i].isscalar = 2;
6749 inst.operands[i].vectype = optype;
6750 inst.operands[i++].present = 1;
6751
6752 if (skip_past_comma (&ptr) == FAIL)
6753 goto wanted_comma;
6754
6755 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6756 {
6757 /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */
6758 inst.operands[i].reg = val;
6759 inst.operands[i].isreg = 1;
6760 inst.operands[i].present = 1;
6761 }
6762 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6763 {
6764 /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */
6765 inst.operands[i].reg = val;
6766 inst.operands[i].isvec = 1;
6767 inst.operands[i].isscalar = 2;
6768 inst.operands[i].vectype = optype;
6769 inst.operands[i++].present = 1;
6770
6771 if (skip_past_comma (&ptr) == FAIL)
6772 goto wanted_comma;
6773
6774 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6775 goto wanted_arm;
6776
6777 inst.operands[i].reg = val;
6778 inst.operands[i].isreg = 1;
6779 inst.operands[i++].present = 1;
6780
6781 if (skip_past_comma (&ptr) == FAIL)
6782 goto wanted_comma;
6783
6784 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6785 goto wanted_arm;
6786
6787 inst.operands[i].reg = val;
6788 inst.operands[i].isreg = 1;
6789 inst.operands[i].present = 1;
6790 }
6791 else
6792 {
6793 first_error (_("expected ARM or MVE vector register"));
6794 return FAIL;
6795 }
6796 }
6797 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6798 {
6799 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6800 inst.operands[i].reg = val;
6801 inst.operands[i].isscalar = 1;
6802 inst.operands[i].vectype = optype;
6803 inst.operands[i++].present = 1;
6804
6805 if (skip_past_comma (&ptr) == FAIL)
6806 goto wanted_comma;
6807
6808 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6809 goto wanted_arm;
6810
6811 inst.operands[i].reg = val;
6812 inst.operands[i].isreg = 1;
6813 inst.operands[i].present = 1;
6814 }
6815 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6816 != FAIL)
6817 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6818 != FAIL))
6819 {
6820 /* Cases 0, 1, 2, 3, 5 (D only). */
6821 if (skip_past_comma (&ptr) == FAIL)
6822 goto wanted_comma;
6823
6824 inst.operands[i].reg = val;
6825 inst.operands[i].isreg = 1;
6826 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6827 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6828 inst.operands[i].isvec = 1;
6829 inst.operands[i].vectype = optype;
6830 inst.operands[i++].present = 1;
6831
6832 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6833 {
6834 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6835 Case 13: VMOV <Sd>, <Rm> */
6836 inst.operands[i].reg = val;
6837 inst.operands[i].isreg = 1;
6838 inst.operands[i].present = 1;
6839
6840 if (rtype == REG_TYPE_NQ)
6841 {
6842 first_error (_("can't use Neon quad register here"));
6843 return FAIL;
6844 }
6845 else if (rtype != REG_TYPE_VFS)
6846 {
6847 i++;
6848 if (skip_past_comma (&ptr) == FAIL)
6849 goto wanted_comma;
6850 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6851 goto wanted_arm;
6852 inst.operands[i].reg = val;
6853 inst.operands[i].isreg = 1;
6854 inst.operands[i].present = 1;
6855 }
6856 }
6857 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6858 &optype)) != FAIL)
6859 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6860 &optype)) != FAIL))
6861 {
6862 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6863 Case 1: VMOV<c><q> <Dd>, <Dm>
6864 Case 8: VMOV.F32 <Sd>, <Sm>
6865 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6866
6867 inst.operands[i].reg = val;
6868 inst.operands[i].isreg = 1;
6869 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6870 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6871 inst.operands[i].isvec = 1;
6872 inst.operands[i].vectype = optype;
6873 inst.operands[i].present = 1;
6874
6875 if (skip_past_comma (&ptr) == SUCCESS)
6876 {
6877 /* Case 15. */
6878 i++;
6879
6880 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6881 goto wanted_arm;
6882
6883 inst.operands[i].reg = val;
6884 inst.operands[i].isreg = 1;
6885 inst.operands[i++].present = 1;
6886
6887 if (skip_past_comma (&ptr) == FAIL)
6888 goto wanted_comma;
6889
6890 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6891 goto wanted_arm;
6892
6893 inst.operands[i].reg = val;
6894 inst.operands[i].isreg = 1;
6895 inst.operands[i].present = 1;
6896 }
6897 }
6898 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6899 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6900 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6901 Case 10: VMOV.F32 <Sd>, #<imm>
6902 Case 11: VMOV.F64 <Dd>, #<imm> */
6903 inst.operands[i].immisfloat = 1;
6904 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6905 == SUCCESS)
6906 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6907 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6908 ;
6909 else
6910 {
6911 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6912 return FAIL;
6913 }
6914 }
6915 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6916 {
6917 /* Cases 6, 7, 16, 18. */
6918 inst.operands[i].reg = val;
6919 inst.operands[i].isreg = 1;
6920 inst.operands[i++].present = 1;
6921
6922 if (skip_past_comma (&ptr) == FAIL)
6923 goto wanted_comma;
6924
6925 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6926 {
6927 /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */
6928 inst.operands[i].reg = val;
6929 inst.operands[i].isscalar = 2;
6930 inst.operands[i].present = 1;
6931 inst.operands[i].vectype = optype;
6932 }
6933 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6934 {
6935 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6936 inst.operands[i].reg = val;
6937 inst.operands[i].isscalar = 1;
6938 inst.operands[i].present = 1;
6939 inst.operands[i].vectype = optype;
6940 }
6941 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6942 {
6943 inst.operands[i].reg = val;
6944 inst.operands[i].isreg = 1;
6945 inst.operands[i++].present = 1;
6946
6947 if (skip_past_comma (&ptr) == FAIL)
6948 goto wanted_comma;
6949
6950 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6951 != FAIL)
6952 {
6953 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6954
6955 inst.operands[i].reg = val;
6956 inst.operands[i].isreg = 1;
6957 inst.operands[i].isvec = 1;
6958 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6959 inst.operands[i].vectype = optype;
6960 inst.operands[i].present = 1;
6961
6962 if (rtype == REG_TYPE_VFS)
6963 {
6964 /* Case 14. */
6965 i++;
6966 if (skip_past_comma (&ptr) == FAIL)
6967 goto wanted_comma;
6968 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6969 &optype)) == FAIL)
6970 {
6971 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6972 return FAIL;
6973 }
6974 inst.operands[i].reg = val;
6975 inst.operands[i].isreg = 1;
6976 inst.operands[i].isvec = 1;
6977 inst.operands[i].issingle = 1;
6978 inst.operands[i].vectype = optype;
6979 inst.operands[i].present = 1;
6980 }
6981 }
6982 else
6983 {
6984 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6985 != FAIL)
6986 {
6987 /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */
6988 inst.operands[i].reg = val;
6989 inst.operands[i].isvec = 1;
6990 inst.operands[i].isscalar = 2;
6991 inst.operands[i].vectype = optype;
6992 inst.operands[i++].present = 1;
6993
6994 if (skip_past_comma (&ptr) == FAIL)
6995 goto wanted_comma;
6996
6997 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6998 == FAIL)
6999 {
7000 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
7001 return FAIL;
7002 }
7003 inst.operands[i].reg = val;
7004 inst.operands[i].isvec = 1;
7005 inst.operands[i].isscalar = 2;
7006 inst.operands[i].vectype = optype;
7007 inst.operands[i].present = 1;
7008 }
7009 else
7010 {
7011 first_error (_("VFP single, double or MVE vector register"
7012 " expected"));
7013 return FAIL;
7014 }
7015 }
7016 }
7017 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
7018 != FAIL)
7019 {
7020 /* Case 13. */
7021 inst.operands[i].reg = val;
7022 inst.operands[i].isreg = 1;
7023 inst.operands[i].isvec = 1;
7024 inst.operands[i].issingle = 1;
7025 inst.operands[i].vectype = optype;
7026 inst.operands[i].present = 1;
7027 }
7028 }
7029 else
7030 {
7031 first_error (_("parse error"));
7032 return FAIL;
7033 }
7034
7035 /* Successfully parsed the operands. Update args. */
7036 *which_operand = i;
7037 *str = ptr;
7038 return SUCCESS;
7039
7040 wanted_comma:
7041 first_error (_("expected comma"));
7042 return FAIL;
7043
7044 wanted_arm:
7045 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7046 return FAIL;
7047 }
7048
7049 /* Use this macro when the operand constraints are different
7050 for ARM and THUMB (e.g. ldrd). */
7051 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7052 ((arm_operand) | ((thumb_operand) << 16))
7053
7054 /* Matcher codes for parse_operands. */
7055 enum operand_parse_code
7056 {
7057 OP_stop, /* end of line */
7058
7059 OP_RR, /* ARM register */
7060 OP_RRnpc, /* ARM register, not r15 */
7061 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7062 OP_RRnpcb, /* ARM register, not r15, in square brackets */
7063 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
7064 optional trailing ! */
7065 OP_RRw, /* ARM register, not r15, optional trailing ! */
7066 OP_RCP, /* Coprocessor number */
7067 OP_RCN, /* Coprocessor register */
7068 OP_RF, /* FPA register */
7069 OP_RVS, /* VFP single precision register */
7070 OP_RVD, /* VFP double precision register (0..15) */
7071 OP_RND, /* Neon double precision register (0..31) */
7072 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
7073 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
7074 */
7075 OP_RNQ, /* Neon quad precision register */
7076 OP_RNQMQ, /* Neon quad or MVE vector register. */
7077 OP_RVSD, /* VFP single or double precision register */
7078 OP_RVSD_COND, /* VFP single, double precision register or condition code. */
7079 OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */
7080 OP_RNSD, /* Neon single or double precision register */
7081 OP_RNDQ, /* Neon double or quad precision register */
7082 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
7083 OP_RNDQMQR, /* Neon double, quad, MVE vector or ARM register. */
7084 OP_RNSDQ, /* Neon single, double or quad precision register */
7085 OP_RNSC, /* Neon scalar D[X] */
7086 OP_RVC, /* VFP control register */
7087 OP_RMF, /* Maverick F register */
7088 OP_RMD, /* Maverick D register */
7089 OP_RMFX, /* Maverick FX register */
7090 OP_RMDX, /* Maverick DX register */
7091 OP_RMAX, /* Maverick AX register */
7092 OP_RMDS, /* Maverick DSPSC register */
7093 OP_RIWR, /* iWMMXt wR register */
7094 OP_RIWC, /* iWMMXt wC register */
7095 OP_RIWG, /* iWMMXt wCG register */
7096 OP_RXA, /* XScale accumulator register */
7097
7098 OP_RNSDMQ, /* Neon single, double or MVE vector register */
7099 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
7100 */
7101 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
7102 GPR (no SP/SP) */
7103 OP_RMQ, /* MVE vector register. */
7104 OP_RMQRZ, /* MVE vector or ARM register including ZR. */
7105 OP_RMQRR, /* MVE vector or ARM register. */
7106
7107 /* New operands for Armv8.1-M Mainline. */
7108 OP_LR, /* ARM LR register */
7109 OP_RRe, /* ARM register, only even numbered. */
7110 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
7111 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7112 OP_RR_ZR, /* ARM register or ZR but no PC */
7113
7114 OP_REGLST, /* ARM register list */
7115 OP_CLRMLST, /* CLRM register list */
7116 OP_VRSLST, /* VFP single-precision register list */
7117 OP_VRDLST, /* VFP double-precision register list */
7118 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
7119 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
7120 OP_NSTRLST, /* Neon element/structure list */
7121 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
7122 OP_MSTRLST2, /* MVE vector list with two elements. */
7123 OP_MSTRLST4, /* MVE vector list with four elements. */
7124
7125 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
7126 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
7127 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
7128 OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7129 zero. */
7130 OP_RR_RNSC, /* ARM reg or Neon scalar. */
7131 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
7132 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
7133 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7134 */
7135 OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7136 scalar, or ARM register. */
7137 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
7138 OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register. */
7139 OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7140 register. */
7141 OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar. */
7142 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
7143 OP_VMOV, /* Neon VMOV operands. */
7144 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
7145 /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */
7146 OP_RNDQMQ_Ibig,
7147 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
7148 OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7149 ARM register. */
7150 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
7151 OP_VLDR, /* VLDR operand. */
7152
7153 OP_I0, /* immediate zero */
7154 OP_I7, /* immediate value 0 .. 7 */
7155 OP_I15, /* 0 .. 15 */
7156 OP_I16, /* 1 .. 16 */
7157 OP_I16z, /* 0 .. 16 */
7158 OP_I31, /* 0 .. 31 */
7159 OP_I31w, /* 0 .. 31, optional trailing ! */
7160 OP_I32, /* 1 .. 32 */
7161 OP_I32z, /* 0 .. 32 */
7162 OP_I48_I64, /* 48 or 64 */
7163 OP_I63, /* 0 .. 63 */
7164 OP_I63s, /* -64 .. 63 */
7165 OP_I64, /* 1 .. 64 */
7166 OP_I64z, /* 0 .. 64 */
7167 OP_I127, /* 0 .. 127 */
7168 OP_I255, /* 0 .. 255 */
7169 OP_I511, /* 0 .. 511 */
7170 OP_I4095, /* 0 .. 4095 */
7171 OP_I8191, /* 0 .. 8191 */
7172 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
7173 OP_I7b, /* 0 .. 7 */
7174 OP_I15b, /* 0 .. 15 */
7175 OP_I31b, /* 0 .. 31 */
7176
7177 OP_SH, /* shifter operand */
7178 OP_SHG, /* shifter operand with possible group relocation */
7179 OP_ADDR, /* Memory address expression (any mode) */
7180 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
7181 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
7182 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7183 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
7184 OP_EXP, /* arbitrary expression */
7185 OP_EXPi, /* same, with optional immediate prefix */
7186 OP_EXPr, /* same, with optional relocation suffix */
7187 OP_EXPs, /* same, with optional non-first operand relocation suffix */
7188 OP_HALF, /* 0 .. 65535 or low/high reloc. */
7189 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
7190 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
7191
7192 OP_CPSF, /* CPS flags */
7193 OP_ENDI, /* Endianness specifier */
7194 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
7195 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
7196 OP_COND, /* conditional code */
7197 OP_TB, /* Table branch. */
7198
7199 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
7200
7201 OP_RRnpc_I0, /* ARM register or literal 0 */
7202 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
7203 OP_RR_EXi, /* ARM register or expression with imm prefix */
7204 OP_RF_IF, /* FPA register or immediate */
7205 OP_RIWR_RIWC, /* iWMMXt R or C reg */
7206 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7207
7208 /* Optional operands. */
7209 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
7210 OP_oI31b, /* 0 .. 31 */
7211 OP_oI32b, /* 1 .. 32 */
7212 OP_oI32z, /* 0 .. 32 */
7213 OP_oIffffb, /* 0 .. 65535 */
7214 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
7215
7216 OP_oRR, /* ARM register */
7217 OP_oLR, /* ARM LR register */
7218 OP_oRRnpc, /* ARM register, not the PC */
7219 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7220 OP_oRRw, /* ARM register, not r15, optional trailing ! */
7221 OP_oRND, /* Optional Neon double precision register */
7222 OP_oRNQ, /* Optional Neon quad precision register */
7223 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
7224 OP_oRNDQ, /* Optional Neon double or quad precision register */
7225 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
7226 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
7227 register. */
7228 OP_oSHll, /* LSL immediate */
7229 OP_oSHar, /* ASR immediate */
7230 OP_oSHllar, /* LSL or ASR immediate */
7231 OP_oROR, /* ROR 0/8/16/24 */
7232 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
7233
7234 OP_oRMQRZ, /* optional MVE vector or ARM register including ZR. */
7235
7236 /* Some pre-defined mixed (ARM/THUMB) operands. */
7237 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7238 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7239 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7240
7241 OP_FIRST_OPTIONAL = OP_oI7b
7242 };
7243
7244 /* Generic instruction operand parser. This does no encoding and no
7245 semantic validation; it merely squirrels values away in the inst
7246 structure. Returns SUCCESS or FAIL depending on whether the
7247 specified grammar matched. */
7248 static int
7249 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
7250 {
7251 unsigned const int *upat = pattern;
7252 char *backtrack_pos = 0;
7253 const char *backtrack_error = 0;
7254 int i, val = 0, backtrack_index = 0;
7255 enum arm_reg_type rtype;
7256 parse_operand_result result;
7257 unsigned int op_parse_code;
7258 bfd_boolean partial_match;
7259
7260 #define po_char_or_fail(chr) \
7261 do \
7262 { \
7263 if (skip_past_char (&str, chr) == FAIL) \
7264 goto bad_args; \
7265 } \
7266 while (0)
7267
7268 #define po_reg_or_fail(regtype) \
7269 do \
7270 { \
7271 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7272 & inst.operands[i].vectype); \
7273 if (val == FAIL) \
7274 { \
7275 first_error (_(reg_expected_msgs[regtype])); \
7276 goto failure; \
7277 } \
7278 inst.operands[i].reg = val; \
7279 inst.operands[i].isreg = 1; \
7280 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7281 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7282 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7283 || rtype == REG_TYPE_VFD \
7284 || rtype == REG_TYPE_NQ); \
7285 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7286 } \
7287 while (0)
7288
7289 #define po_reg_or_goto(regtype, label) \
7290 do \
7291 { \
7292 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7293 & inst.operands[i].vectype); \
7294 if (val == FAIL) \
7295 goto label; \
7296 \
7297 inst.operands[i].reg = val; \
7298 inst.operands[i].isreg = 1; \
7299 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7300 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7301 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7302 || rtype == REG_TYPE_VFD \
7303 || rtype == REG_TYPE_NQ); \
7304 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7305 } \
7306 while (0)
7307
7308 #define po_imm_or_fail(min, max, popt) \
7309 do \
7310 { \
7311 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
7312 goto failure; \
7313 inst.operands[i].imm = val; \
7314 } \
7315 while (0)
7316
7317 #define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \
7318 do \
7319 { \
7320 expressionS exp; \
7321 my_get_expression (&exp, &str, popt); \
7322 if (exp.X_op != O_constant) \
7323 { \
7324 inst.error = _("constant expression required"); \
7325 goto failure; \
7326 } \
7327 if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7328 { \
7329 inst.error = _("immediate value 48 or 64 expected"); \
7330 goto failure; \
7331 } \
7332 inst.operands[i].imm = exp.X_add_number; \
7333 } \
7334 while (0)
7335
7336 #define po_scalar_or_goto(elsz, label, reg_type) \
7337 do \
7338 { \
7339 val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \
7340 reg_type); \
7341 if (val == FAIL) \
7342 goto label; \
7343 inst.operands[i].reg = val; \
7344 inst.operands[i].isscalar = 1; \
7345 } \
7346 while (0)
7347
7348 #define po_misc_or_fail(expr) \
7349 do \
7350 { \
7351 if (expr) \
7352 goto failure; \
7353 } \
7354 while (0)
7355
7356 #define po_misc_or_fail_no_backtrack(expr) \
7357 do \
7358 { \
7359 result = expr; \
7360 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7361 backtrack_pos = 0; \
7362 if (result != PARSE_OPERAND_SUCCESS) \
7363 goto failure; \
7364 } \
7365 while (0)
7366
7367 #define po_barrier_or_imm(str) \
7368 do \
7369 { \
7370 val = parse_barrier (&str); \
7371 if (val == FAIL && ! ISALPHA (*str)) \
7372 goto immediate; \
7373 if (val == FAIL \
7374 /* ISB can only take SY as an option. */ \
7375 || ((inst.instruction & 0xf0) == 0x60 \
7376 && val != 0xf)) \
7377 { \
7378 inst.error = _("invalid barrier type"); \
7379 backtrack_pos = 0; \
7380 goto failure; \
7381 } \
7382 } \
7383 while (0)
7384
7385 skip_whitespace (str);
7386
7387 for (i = 0; upat[i] != OP_stop; i++)
7388 {
7389 op_parse_code = upat[i];
7390 if (op_parse_code >= 1<<16)
7391 op_parse_code = thumb ? (op_parse_code >> 16)
7392 : (op_parse_code & ((1<<16)-1));
7393
7394 if (op_parse_code >= OP_FIRST_OPTIONAL)
7395 {
7396 /* Remember where we are in case we need to backtrack. */
7397 backtrack_pos = str;
7398 backtrack_error = inst.error;
7399 backtrack_index = i;
7400 }
7401
7402 if (i > 0 && (i > 1 || inst.operands[0].present))
7403 po_char_or_fail (',');
7404
7405 switch (op_parse_code)
7406 {
7407 /* Registers */
7408 case OP_oRRnpc:
7409 case OP_oRRnpcsp:
7410 case OP_RRnpc:
7411 case OP_RRnpcsp:
7412 case OP_oRR:
7413 case OP_RRe:
7414 case OP_RRo:
7415 case OP_LR:
7416 case OP_oLR:
7417 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7418 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7419 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7420 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7421 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7422 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7423 case OP_oRND:
7424 case OP_RNDMQR:
7425 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7426 break;
7427 try_rndmq:
7428 case OP_RNDMQ:
7429 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7430 break;
7431 try_rnd:
7432 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7433 case OP_RVC:
7434 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7435 break;
7436 /* Also accept generic coprocessor regs for unknown registers. */
7437 coproc_reg:
7438 po_reg_or_goto (REG_TYPE_CN, vpr_po);
7439 break;
7440 /* Also accept P0 or p0 for VPR.P0. Since P0 is already an
7441 existing register with a value of 0, this seems like the
7442 best way to parse P0. */
7443 vpr_po:
7444 if (strncasecmp (str, "P0", 2) == 0)
7445 {
7446 str += 2;
7447 inst.operands[i].isreg = 1;
7448 inst.operands[i].reg = 13;
7449 }
7450 else
7451 goto failure;
7452 break;
7453 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7454 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7455 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7456 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7457 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7458 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7459 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7460 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7461 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7462 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7463 case OP_oRNQ:
7464 case OP_RNQMQ:
7465 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7466 break;
7467 try_nq:
7468 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7469 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7470 case OP_RNDQMQR:
7471 po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7472 break;
7473 try_rndqmq:
7474 case OP_oRNDQMQ:
7475 case OP_RNDQMQ:
7476 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7477 break;
7478 try_rndq:
7479 case OP_oRNDQ:
7480 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7481 case OP_RVSDMQ:
7482 po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7483 break;
7484 try_rvsd:
7485 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7486 case OP_RVSD_COND:
7487 po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7488 break;
7489 case OP_RNSDMQ:
7490 po_reg_or_goto (REG_TYPE_NSD, try_mq2);
7491 break;
7492 try_mq2:
7493 po_reg_or_fail (REG_TYPE_MQ);
7494 break;
7495 case OP_oRNSDQ:
7496 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7497 case OP_RNSDQMQR:
7498 po_reg_or_goto (REG_TYPE_RN, try_mq);
7499 break;
7500 try_mq:
7501 case OP_oRNSDQMQ:
7502 case OP_RNSDQMQ:
7503 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7504 break;
7505 try_nsdq2:
7506 po_reg_or_fail (REG_TYPE_NSDQ);
7507 inst.error = 0;
7508 break;
7509 case OP_RMQRR:
7510 po_reg_or_goto (REG_TYPE_RN, try_rmq);
7511 break;
7512 try_rmq:
7513 case OP_RMQ:
7514 po_reg_or_fail (REG_TYPE_MQ);
7515 break;
7516 /* Neon scalar. Using an element size of 8 means that some invalid
7517 scalars are accepted here, so deal with those in later code. */
7518 case OP_RNSC: po_scalar_or_goto (8, failure, REG_TYPE_VFD); break;
7519
7520 case OP_RNDQ_I0:
7521 {
7522 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7523 break;
7524 try_imm0:
7525 po_imm_or_fail (0, 0, TRUE);
7526 }
7527 break;
7528
7529 case OP_RVSD_I0:
7530 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7531 break;
7532
7533 case OP_RSVDMQ_FI0:
7534 po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7535 break;
7536 try_rsvd_fi0:
7537 case OP_RSVD_FI0:
7538 {
7539 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7540 break;
7541 try_ifimm0:
7542 if (parse_ifimm_zero (&str))
7543 inst.operands[i].imm = 0;
7544 else
7545 {
7546 inst.error
7547 = _("only floating point zero is allowed as immediate value");
7548 goto failure;
7549 }
7550 }
7551 break;
7552
7553 case OP_RR_RNSC:
7554 {
7555 po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7556 break;
7557 try_rr:
7558 po_reg_or_fail (REG_TYPE_RN);
7559 }
7560 break;
7561
7562 case OP_RNSDQ_RNSC_MQ_RR:
7563 po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7564 break;
7565 try_rnsdq_rnsc_mq:
7566 case OP_RNSDQ_RNSC_MQ:
7567 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7568 break;
7569 try_rnsdq_rnsc:
7570 case OP_RNSDQ_RNSC:
7571 {
7572 po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7573 inst.error = 0;
7574 break;
7575 try_nsdq:
7576 po_reg_or_fail (REG_TYPE_NSDQ);
7577 inst.error = 0;
7578 }
7579 break;
7580
7581 case OP_RNSD_RNSC:
7582 {
7583 po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7584 break;
7585 try_s_scalar:
7586 po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7587 break;
7588 try_nsd:
7589 po_reg_or_fail (REG_TYPE_NSD);
7590 }
7591 break;
7592
7593 case OP_RNDQMQ_RNSC_RR:
7594 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7595 break;
7596 try_rndq_rnsc_rr:
7597 case OP_RNDQ_RNSC_RR:
7598 po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7599 break;
7600 case OP_RNDQMQ_RNSC:
7601 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7602 break;
7603 try_rndq_rnsc:
7604 case OP_RNDQ_RNSC:
7605 {
7606 po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7607 break;
7608 try_ndq:
7609 po_reg_or_fail (REG_TYPE_NDQ);
7610 }
7611 break;
7612
7613 case OP_RND_RNSC:
7614 {
7615 po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7616 break;
7617 try_vfd:
7618 po_reg_or_fail (REG_TYPE_VFD);
7619 }
7620 break;
7621
7622 case OP_VMOV:
7623 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7624 not careful then bad things might happen. */
7625 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7626 break;
7627
7628 case OP_RNDQMQ_Ibig:
7629 po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7630 break;
7631 try_rndq_ibig:
7632 case OP_RNDQ_Ibig:
7633 {
7634 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7635 break;
7636 try_immbig:
7637 /* There's a possibility of getting a 64-bit immediate here, so
7638 we need special handling. */
7639 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7640 == FAIL)
7641 {
7642 inst.error = _("immediate value is out of range");
7643 goto failure;
7644 }
7645 }
7646 break;
7647
7648 case OP_RNDQMQ_I63b_RR:
7649 po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7650 break;
7651 try_rndq_i63b_rr:
7652 po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7653 break;
7654 try_rndq_i63b:
7655 case OP_RNDQ_I63b:
7656 {
7657 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7658 break;
7659 try_shimm:
7660 po_imm_or_fail (0, 63, TRUE);
7661 }
7662 break;
7663
7664 case OP_RRnpcb:
7665 po_char_or_fail ('[');
7666 po_reg_or_fail (REG_TYPE_RN);
7667 po_char_or_fail (']');
7668 break;
7669
7670 case OP_RRnpctw:
7671 case OP_RRw:
7672 case OP_oRRw:
7673 po_reg_or_fail (REG_TYPE_RN);
7674 if (skip_past_char (&str, '!') == SUCCESS)
7675 inst.operands[i].writeback = 1;
7676 break;
7677
7678 /* Immediates */
7679 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7680 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7681 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7682 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7683 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7684 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7685 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7686 case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
7687 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7688 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7689 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7690 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7691 case OP_I127: po_imm_or_fail ( 0, 127, FALSE); break;
7692 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7693 case OP_I511: po_imm_or_fail ( 0, 511, FALSE); break;
7694 case OP_I4095: po_imm_or_fail ( 0, 4095, FALSE); break;
7695 case OP_I8191: po_imm_or_fail ( 0, 8191, FALSE); break;
7696 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7697 case OP_oI7b:
7698 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7699 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7700 case OP_oI31b:
7701 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7702 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7703 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7704 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7705
7706 /* Immediate variants */
7707 case OP_oI255c:
7708 po_char_or_fail ('{');
7709 po_imm_or_fail (0, 255, TRUE);
7710 po_char_or_fail ('}');
7711 break;
7712
7713 case OP_I31w:
7714 /* The expression parser chokes on a trailing !, so we have
7715 to find it first and zap it. */
7716 {
7717 char *s = str;
7718 while (*s && *s != ',')
7719 s++;
7720 if (s[-1] == '!')
7721 {
7722 s[-1] = '\0';
7723 inst.operands[i].writeback = 1;
7724 }
7725 po_imm_or_fail (0, 31, TRUE);
7726 if (str == s - 1)
7727 str = s;
7728 }
7729 break;
7730
7731 /* Expressions */
7732 case OP_EXPi: EXPi:
7733 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7734 GE_OPT_PREFIX));
7735 break;
7736
7737 case OP_EXP:
7738 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7739 GE_NO_PREFIX));
7740 break;
7741
7742 case OP_EXPr: EXPr:
7743 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7744 GE_NO_PREFIX));
7745 if (inst.relocs[0].exp.X_op == O_symbol)
7746 {
7747 val = parse_reloc (&str);
7748 if (val == -1)
7749 {
7750 inst.error = _("unrecognized relocation suffix");
7751 goto failure;
7752 }
7753 else if (val != BFD_RELOC_UNUSED)
7754 {
7755 inst.operands[i].imm = val;
7756 inst.operands[i].hasreloc = 1;
7757 }
7758 }
7759 break;
7760
7761 case OP_EXPs:
7762 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7763 GE_NO_PREFIX));
7764 if (inst.relocs[i].exp.X_op == O_symbol)
7765 {
7766 inst.operands[i].hasreloc = 1;
7767 }
7768 else if (inst.relocs[i].exp.X_op == O_constant)
7769 {
7770 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7771 inst.operands[i].hasreloc = 0;
7772 }
7773 break;
7774
7775 /* Operand for MOVW or MOVT. */
7776 case OP_HALF:
7777 po_misc_or_fail (parse_half (&str));
7778 break;
7779
7780 /* Register or expression. */
7781 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7782 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7783
7784 /* Register or immediate. */
7785 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7786 I0: po_imm_or_fail (0, 0, FALSE); break;
7787
7788 case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break;
7789 I32: po_imm_or_fail (1, 32, FALSE); break;
7790
7791 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7792 IF:
7793 if (!is_immediate_prefix (*str))
7794 goto bad_args;
7795 str++;
7796 val = parse_fpa_immediate (&str);
7797 if (val == FAIL)
7798 goto failure;
7799 /* FPA immediates are encoded as registers 8-15.
7800 parse_fpa_immediate has already applied the offset. */
7801 inst.operands[i].reg = val;
7802 inst.operands[i].isreg = 1;
7803 break;
7804
7805 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7806 I32z: po_imm_or_fail (0, 32, FALSE); break;
7807
7808 /* Two kinds of register. */
7809 case OP_RIWR_RIWC:
7810 {
7811 struct reg_entry *rege = arm_reg_parse_multi (&str);
7812 if (!rege
7813 || (rege->type != REG_TYPE_MMXWR
7814 && rege->type != REG_TYPE_MMXWC
7815 && rege->type != REG_TYPE_MMXWCG))
7816 {
7817 inst.error = _("iWMMXt data or control register expected");
7818 goto failure;
7819 }
7820 inst.operands[i].reg = rege->number;
7821 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7822 }
7823 break;
7824
7825 case OP_RIWC_RIWG:
7826 {
7827 struct reg_entry *rege = arm_reg_parse_multi (&str);
7828 if (!rege
7829 || (rege->type != REG_TYPE_MMXWC
7830 && rege->type != REG_TYPE_MMXWCG))
7831 {
7832 inst.error = _("iWMMXt control register expected");
7833 goto failure;
7834 }
7835 inst.operands[i].reg = rege->number;
7836 inst.operands[i].isreg = 1;
7837 }
7838 break;
7839
7840 /* Misc */
7841 case OP_CPSF: val = parse_cps_flags (&str); break;
7842 case OP_ENDI: val = parse_endian_specifier (&str); break;
7843 case OP_oROR: val = parse_ror (&str); break;
7844 try_cond:
7845 case OP_COND: val = parse_cond (&str); break;
7846 case OP_oBARRIER_I15:
7847 po_barrier_or_imm (str); break;
7848 immediate:
7849 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7850 goto failure;
7851 break;
7852
7853 case OP_wPSR:
7854 case OP_rPSR:
7855 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7856 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7857 {
7858 inst.error = _("Banked registers are not available with this "
7859 "architecture.");
7860 goto failure;
7861 }
7862 break;
7863 try_psr:
7864 val = parse_psr (&str, op_parse_code == OP_wPSR);
7865 break;
7866
7867 case OP_VLDR:
7868 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7869 break;
7870 try_sysreg:
7871 val = parse_sys_vldr_vstr (&str);
7872 break;
7873
7874 case OP_APSR_RR:
7875 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7876 break;
7877 try_apsr:
7878 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7879 instruction). */
7880 if (strncasecmp (str, "APSR_", 5) == 0)
7881 {
7882 unsigned found = 0;
7883 str += 5;
7884 while (found < 15)
7885 switch (*str++)
7886 {
7887 case 'c': found = (found & 1) ? 16 : found | 1; break;
7888 case 'n': found = (found & 2) ? 16 : found | 2; break;
7889 case 'z': found = (found & 4) ? 16 : found | 4; break;
7890 case 'v': found = (found & 8) ? 16 : found | 8; break;
7891 default: found = 16;
7892 }
7893 if (found != 15)
7894 goto failure;
7895 inst.operands[i].isvec = 1;
7896 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7897 inst.operands[i].reg = REG_PC;
7898 }
7899 else
7900 goto failure;
7901 break;
7902
7903 case OP_TB:
7904 po_misc_or_fail (parse_tb (&str));
7905 break;
7906
7907 /* Register lists. */
7908 case OP_REGLST:
7909 val = parse_reg_list (&str, REGLIST_RN);
7910 if (*str == '^')
7911 {
7912 inst.operands[i].writeback = 1;
7913 str++;
7914 }
7915 break;
7916
7917 case OP_CLRMLST:
7918 val = parse_reg_list (&str, REGLIST_CLRM);
7919 break;
7920
7921 case OP_VRSLST:
7922 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7923 &partial_match);
7924 break;
7925
7926 case OP_VRDLST:
7927 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7928 &partial_match);
7929 break;
7930
7931 case OP_VRSDLST:
7932 /* Allow Q registers too. */
7933 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7934 REGLIST_NEON_D, &partial_match);
7935 if (val == FAIL)
7936 {
7937 inst.error = NULL;
7938 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7939 REGLIST_VFP_S, &partial_match);
7940 inst.operands[i].issingle = 1;
7941 }
7942 break;
7943
7944 case OP_VRSDVLST:
7945 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7946 REGLIST_VFP_D_VPR, &partial_match);
7947 if (val == FAIL && !partial_match)
7948 {
7949 inst.error = NULL;
7950 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7951 REGLIST_VFP_S_VPR, &partial_match);
7952 inst.operands[i].issingle = 1;
7953 }
7954 break;
7955
7956 case OP_NRDLST:
7957 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7958 REGLIST_NEON_D, &partial_match);
7959 break;
7960
7961 case OP_MSTRLST4:
7962 case OP_MSTRLST2:
7963 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7964 1, &inst.operands[i].vectype);
7965 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7966 goto failure;
7967 break;
7968 case OP_NSTRLST:
7969 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7970 0, &inst.operands[i].vectype);
7971 break;
7972
7973 /* Addressing modes */
7974 case OP_ADDRMVE:
7975 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7976 break;
7977
7978 case OP_ADDR:
7979 po_misc_or_fail (parse_address (&str, i));
7980 break;
7981
7982 case OP_ADDRGLDR:
7983 po_misc_or_fail_no_backtrack (
7984 parse_address_group_reloc (&str, i, GROUP_LDR));
7985 break;
7986
7987 case OP_ADDRGLDRS:
7988 po_misc_or_fail_no_backtrack (
7989 parse_address_group_reloc (&str, i, GROUP_LDRS));
7990 break;
7991
7992 case OP_ADDRGLDC:
7993 po_misc_or_fail_no_backtrack (
7994 parse_address_group_reloc (&str, i, GROUP_LDC));
7995 break;
7996
7997 case OP_SH:
7998 po_misc_or_fail (parse_shifter_operand (&str, i));
7999 break;
8000
8001 case OP_SHG:
8002 po_misc_or_fail_no_backtrack (
8003 parse_shifter_operand_group_reloc (&str, i));
8004 break;
8005
8006 case OP_oSHll:
8007 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
8008 break;
8009
8010 case OP_oSHar:
8011 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
8012 break;
8013
8014 case OP_oSHllar:
8015 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
8016 break;
8017
8018 case OP_RMQRZ:
8019 case OP_oRMQRZ:
8020 po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
8021 break;
8022
8023 case OP_RR_ZR:
8024 try_rr_zr:
8025 po_reg_or_goto (REG_TYPE_RN, ZR);
8026 break;
8027 ZR:
8028 po_reg_or_fail (REG_TYPE_ZR);
8029 break;
8030
8031 default:
8032 as_fatal (_("unhandled operand code %d"), op_parse_code);
8033 }
8034
8035 /* Various value-based sanity checks and shared operations. We
8036 do not signal immediate failures for the register constraints;
8037 this allows a syntax error to take precedence. */
8038 switch (op_parse_code)
8039 {
8040 case OP_oRRnpc:
8041 case OP_RRnpc:
8042 case OP_RRnpcb:
8043 case OP_RRw:
8044 case OP_oRRw:
8045 case OP_RRnpc_I0:
8046 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8047 inst.error = BAD_PC;
8048 break;
8049
8050 case OP_oRRnpcsp:
8051 case OP_RRnpcsp:
8052 case OP_RRnpcsp_I32:
8053 if (inst.operands[i].isreg)
8054 {
8055 if (inst.operands[i].reg == REG_PC)
8056 inst.error = BAD_PC;
8057 else if (inst.operands[i].reg == REG_SP
8058 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8059 relaxed since ARMv8-A. */
8060 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8061 {
8062 gas_assert (thumb);
8063 inst.error = BAD_SP;
8064 }
8065 }
8066 break;
8067
8068 case OP_RRnpctw:
8069 if (inst.operands[i].isreg
8070 && inst.operands[i].reg == REG_PC
8071 && (inst.operands[i].writeback || thumb))
8072 inst.error = BAD_PC;
8073 break;
8074
8075 case OP_RVSD_COND:
8076 case OP_VLDR:
8077 if (inst.operands[i].isreg)
8078 break;
8079 /* fall through. */
8080
8081 case OP_CPSF:
8082 case OP_ENDI:
8083 case OP_oROR:
8084 case OP_wPSR:
8085 case OP_rPSR:
8086 case OP_COND:
8087 case OP_oBARRIER_I15:
8088 case OP_REGLST:
8089 case OP_CLRMLST:
8090 case OP_VRSLST:
8091 case OP_VRDLST:
8092 case OP_VRSDLST:
8093 case OP_VRSDVLST:
8094 case OP_NRDLST:
8095 case OP_NSTRLST:
8096 case OP_MSTRLST2:
8097 case OP_MSTRLST4:
8098 if (val == FAIL)
8099 goto failure;
8100 inst.operands[i].imm = val;
8101 break;
8102
8103 case OP_LR:
8104 case OP_oLR:
8105 if (inst.operands[i].reg != REG_LR)
8106 inst.error = _("operand must be LR register");
8107 break;
8108
8109 case OP_RMQRZ:
8110 case OP_oRMQRZ:
8111 case OP_RR_ZR:
8112 if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8113 inst.error = BAD_PC;
8114 break;
8115
8116 case OP_RRe:
8117 if (inst.operands[i].isreg
8118 && (inst.operands[i].reg & 0x00000001) != 0)
8119 inst.error = BAD_ODD;
8120 break;
8121
8122 case OP_RRo:
8123 if (inst.operands[i].isreg)
8124 {
8125 if ((inst.operands[i].reg & 0x00000001) != 1)
8126 inst.error = BAD_EVEN;
8127 else if (inst.operands[i].reg == REG_SP)
8128 as_tsktsk (MVE_BAD_SP);
8129 else if (inst.operands[i].reg == REG_PC)
8130 inst.error = BAD_PC;
8131 }
8132 break;
8133
8134 default:
8135 break;
8136 }
8137
8138 /* If we get here, this operand was successfully parsed. */
8139 inst.operands[i].present = 1;
8140 continue;
8141
8142 bad_args:
8143 inst.error = BAD_ARGS;
8144
8145 failure:
8146 if (!backtrack_pos)
8147 {
8148 /* The parse routine should already have set inst.error, but set a
8149 default here just in case. */
8150 if (!inst.error)
8151 inst.error = BAD_SYNTAX;
8152 return FAIL;
8153 }
8154
8155 /* Do not backtrack over a trailing optional argument that
8156 absorbed some text. We will only fail again, with the
8157 'garbage following instruction' error message, which is
8158 probably less helpful than the current one. */
8159 if (backtrack_index == i && backtrack_pos != str
8160 && upat[i+1] == OP_stop)
8161 {
8162 if (!inst.error)
8163 inst.error = BAD_SYNTAX;
8164 return FAIL;
8165 }
8166
8167 /* Try again, skipping the optional argument at backtrack_pos. */
8168 str = backtrack_pos;
8169 inst.error = backtrack_error;
8170 inst.operands[backtrack_index].present = 0;
8171 i = backtrack_index;
8172 backtrack_pos = 0;
8173 }
8174
8175 /* Check that we have parsed all the arguments. */
8176 if (*str != '\0' && !inst.error)
8177 inst.error = _("garbage following instruction");
8178
8179 return inst.error ? FAIL : SUCCESS;
8180 }
8181
8182 #undef po_char_or_fail
8183 #undef po_reg_or_fail
8184 #undef po_reg_or_goto
8185 #undef po_imm_or_fail
8186 #undef po_scalar_or_fail
8187 #undef po_barrier_or_imm
8188
8189 /* Shorthand macro for instruction encoding functions issuing errors. */
8190 #define constraint(expr, err) \
8191 do \
8192 { \
8193 if (expr) \
8194 { \
8195 inst.error = err; \
8196 return; \
8197 } \
8198 } \
8199 while (0)
8200
8201 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
8202 instructions are unpredictable if these registers are used. This
8203 is the BadReg predicate in ARM's Thumb-2 documentation.
8204
8205 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8206 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
8207 #define reject_bad_reg(reg) \
8208 do \
8209 if (reg == REG_PC) \
8210 { \
8211 inst.error = BAD_PC; \
8212 return; \
8213 } \
8214 else if (reg == REG_SP \
8215 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
8216 { \
8217 inst.error = BAD_SP; \
8218 return; \
8219 } \
8220 while (0)
8221
8222 /* If REG is R13 (the stack pointer), warn that its use is
8223 deprecated. */
8224 #define warn_deprecated_sp(reg) \
8225 do \
8226 if (warn_on_deprecated && reg == REG_SP) \
8227 as_tsktsk (_("use of r13 is deprecated")); \
8228 while (0)
8229
8230 /* Functions for operand encoding. ARM, then Thumb. */
8231
8232 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8233
8234 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8235
8236 The only binary encoding difference is the Coprocessor number. Coprocessor
8237 9 is used for half-precision calculations or conversions. The format of the
8238 instruction is the same as the equivalent Coprocessor 10 instruction that
8239 exists for Single-Precision operation. */
8240
8241 static void
8242 do_scalar_fp16_v82_encode (void)
8243 {
8244 if (inst.cond < COND_ALWAYS)
8245 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8246 " the behaviour is UNPREDICTABLE"));
8247 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8248 _(BAD_FP16));
8249
8250 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8251 mark_feature_used (&arm_ext_fp16);
8252 }
8253
8254 /* If VAL can be encoded in the immediate field of an ARM instruction,
8255 return the encoded form. Otherwise, return FAIL. */
8256
8257 static unsigned int
8258 encode_arm_immediate (unsigned int val)
8259 {
8260 unsigned int a, i;
8261
8262 if (val <= 0xff)
8263 return val;
8264
8265 for (i = 2; i < 32; i += 2)
8266 if ((a = rotate_left (val, i)) <= 0xff)
8267 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
8268
8269 return FAIL;
8270 }
8271
8272 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8273 return the encoded form. Otherwise, return FAIL. */
8274 static unsigned int
8275 encode_thumb32_immediate (unsigned int val)
8276 {
8277 unsigned int a, i;
8278
8279 if (val <= 0xff)
8280 return val;
8281
8282 for (i = 1; i <= 24; i++)
8283 {
8284 a = val >> i;
8285 if ((val & ~(0xff << i)) == 0)
8286 return ((val >> i) & 0x7f) | ((32 - i) << 7);
8287 }
8288
8289 a = val & 0xff;
8290 if (val == ((a << 16) | a))
8291 return 0x100 | a;
8292 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8293 return 0x300 | a;
8294
8295 a = val & 0xff00;
8296 if (val == ((a << 16) | a))
8297 return 0x200 | (a >> 8);
8298
8299 return FAIL;
8300 }
8301 /* Encode a VFP SP or DP register number into inst.instruction. */
8302
8303 static void
8304 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8305 {
8306 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8307 && reg > 15)
8308 {
8309 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8310 {
8311 if (thumb_mode)
8312 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8313 fpu_vfp_ext_d32);
8314 else
8315 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8316 fpu_vfp_ext_d32);
8317 }
8318 else
8319 {
8320 first_error (_("D register out of range for selected VFP version"));
8321 return;
8322 }
8323 }
8324
8325 switch (pos)
8326 {
8327 case VFP_REG_Sd:
8328 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8329 break;
8330
8331 case VFP_REG_Sn:
8332 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8333 break;
8334
8335 case VFP_REG_Sm:
8336 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8337 break;
8338
8339 case VFP_REG_Dd:
8340 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8341 break;
8342
8343 case VFP_REG_Dn:
8344 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8345 break;
8346
8347 case VFP_REG_Dm:
8348 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8349 break;
8350
8351 default:
8352 abort ();
8353 }
8354 }
8355
8356 /* Encode a <shift> in an ARM-format instruction. The immediate,
8357 if any, is handled by md_apply_fix. */
8358 static void
8359 encode_arm_shift (int i)
8360 {
8361 /* register-shifted register. */
8362 if (inst.operands[i].immisreg)
8363 {
8364 int op_index;
8365 for (op_index = 0; op_index <= i; ++op_index)
8366 {
8367 /* Check the operand only when it's presented. In pre-UAL syntax,
8368 if the destination register is the same as the first operand, two
8369 register form of the instruction can be used. */
8370 if (inst.operands[op_index].present && inst.operands[op_index].isreg
8371 && inst.operands[op_index].reg == REG_PC)
8372 as_warn (UNPRED_REG ("r15"));
8373 }
8374
8375 if (inst.operands[i].imm == REG_PC)
8376 as_warn (UNPRED_REG ("r15"));
8377 }
8378
8379 if (inst.operands[i].shift_kind == SHIFT_RRX)
8380 inst.instruction |= SHIFT_ROR << 5;
8381 else
8382 {
8383 inst.instruction |= inst.operands[i].shift_kind << 5;
8384 if (inst.operands[i].immisreg)
8385 {
8386 inst.instruction |= SHIFT_BY_REG;
8387 inst.instruction |= inst.operands[i].imm << 8;
8388 }
8389 else
8390 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8391 }
8392 }
8393
8394 static void
8395 encode_arm_shifter_operand (int i)
8396 {
8397 if (inst.operands[i].isreg)
8398 {
8399 inst.instruction |= inst.operands[i].reg;
8400 encode_arm_shift (i);
8401 }
8402 else
8403 {
8404 inst.instruction |= INST_IMMEDIATE;
8405 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8406 inst.instruction |= inst.operands[i].imm;
8407 }
8408 }
8409
8410 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
8411 static void
8412 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
8413 {
8414 /* PR 14260:
8415 Generate an error if the operand is not a register. */
8416 constraint (!inst.operands[i].isreg,
8417 _("Instruction does not support =N addresses"));
8418
8419 inst.instruction |= inst.operands[i].reg << 16;
8420
8421 if (inst.operands[i].preind)
8422 {
8423 if (is_t)
8424 {
8425 inst.error = _("instruction does not accept preindexed addressing");
8426 return;
8427 }
8428 inst.instruction |= PRE_INDEX;
8429 if (inst.operands[i].writeback)
8430 inst.instruction |= WRITE_BACK;
8431
8432 }
8433 else if (inst.operands[i].postind)
8434 {
8435 gas_assert (inst.operands[i].writeback);
8436 if (is_t)
8437 inst.instruction |= WRITE_BACK;
8438 }
8439 else /* unindexed - only for coprocessor */
8440 {
8441 inst.error = _("instruction does not accept unindexed addressing");
8442 return;
8443 }
8444
8445 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8446 && (((inst.instruction & 0x000f0000) >> 16)
8447 == ((inst.instruction & 0x0000f000) >> 12)))
8448 as_warn ((inst.instruction & LOAD_BIT)
8449 ? _("destination register same as write-back base")
8450 : _("source register same as write-back base"));
8451 }
8452
8453 /* inst.operands[i] was set up by parse_address. Encode it into an
8454 ARM-format mode 2 load or store instruction. If is_t is true,
8455 reject forms that cannot be used with a T instruction (i.e. not
8456 post-indexed). */
8457 static void
8458 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8459 {
8460 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8461
8462 encode_arm_addr_mode_common (i, is_t);
8463
8464 if (inst.operands[i].immisreg)
8465 {
8466 constraint ((inst.operands[i].imm == REG_PC
8467 || (is_pc && inst.operands[i].writeback)),
8468 BAD_PC_ADDRESSING);
8469 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8470 inst.instruction |= inst.operands[i].imm;
8471 if (!inst.operands[i].negative)
8472 inst.instruction |= INDEX_UP;
8473 if (inst.operands[i].shifted)
8474 {
8475 if (inst.operands[i].shift_kind == SHIFT_RRX)
8476 inst.instruction |= SHIFT_ROR << 5;
8477 else
8478 {
8479 inst.instruction |= inst.operands[i].shift_kind << 5;
8480 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8481 }
8482 }
8483 }
8484 else /* immediate offset in inst.relocs[0] */
8485 {
8486 if (is_pc && !inst.relocs[0].pc_rel)
8487 {
8488 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8489
8490 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8491 cannot use PC in addressing.
8492 PC cannot be used in writeback addressing, either. */
8493 constraint ((is_t || inst.operands[i].writeback),
8494 BAD_PC_ADDRESSING);
8495
8496 /* Use of PC in str is deprecated for ARMv7. */
8497 if (warn_on_deprecated
8498 && !is_load
8499 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8500 as_tsktsk (_("use of PC in this instruction is deprecated"));
8501 }
8502
8503 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8504 {
8505 /* Prefer + for zero encoded value. */
8506 if (!inst.operands[i].negative)
8507 inst.instruction |= INDEX_UP;
8508 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8509 }
8510 }
8511 }
8512
8513 /* inst.operands[i] was set up by parse_address. Encode it into an
8514 ARM-format mode 3 load or store instruction. Reject forms that
8515 cannot be used with such instructions. If is_t is true, reject
8516 forms that cannot be used with a T instruction (i.e. not
8517 post-indexed). */
8518 static void
8519 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8520 {
8521 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8522 {
8523 inst.error = _("instruction does not accept scaled register index");
8524 return;
8525 }
8526
8527 encode_arm_addr_mode_common (i, is_t);
8528
8529 if (inst.operands[i].immisreg)
8530 {
8531 constraint ((inst.operands[i].imm == REG_PC
8532 || (is_t && inst.operands[i].reg == REG_PC)),
8533 BAD_PC_ADDRESSING);
8534 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8535 BAD_PC_WRITEBACK);
8536 inst.instruction |= inst.operands[i].imm;
8537 if (!inst.operands[i].negative)
8538 inst.instruction |= INDEX_UP;
8539 }
8540 else /* immediate offset in inst.relocs[0] */
8541 {
8542 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8543 && inst.operands[i].writeback),
8544 BAD_PC_WRITEBACK);
8545 inst.instruction |= HWOFFSET_IMM;
8546 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8547 {
8548 /* Prefer + for zero encoded value. */
8549 if (!inst.operands[i].negative)
8550 inst.instruction |= INDEX_UP;
8551
8552 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8553 }
8554 }
8555 }
8556
8557 /* Write immediate bits [7:0] to the following locations:
8558
8559 |28/24|23 19|18 16|15 4|3 0|
8560 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8561
8562 This function is used by VMOV/VMVN/VORR/VBIC. */
8563
8564 static void
8565 neon_write_immbits (unsigned immbits)
8566 {
8567 inst.instruction |= immbits & 0xf;
8568 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8569 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8570 }
8571
8572 /* Invert low-order SIZE bits of XHI:XLO. */
8573
8574 static void
8575 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8576 {
8577 unsigned immlo = xlo ? *xlo : 0;
8578 unsigned immhi = xhi ? *xhi : 0;
8579
8580 switch (size)
8581 {
8582 case 8:
8583 immlo = (~immlo) & 0xff;
8584 break;
8585
8586 case 16:
8587 immlo = (~immlo) & 0xffff;
8588 break;
8589
8590 case 64:
8591 immhi = (~immhi) & 0xffffffff;
8592 /* fall through. */
8593
8594 case 32:
8595 immlo = (~immlo) & 0xffffffff;
8596 break;
8597
8598 default:
8599 abort ();
8600 }
8601
8602 if (xlo)
8603 *xlo = immlo;
8604
8605 if (xhi)
8606 *xhi = immhi;
8607 }
8608
8609 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8610 A, B, C, D. */
8611
8612 static int
8613 neon_bits_same_in_bytes (unsigned imm)
8614 {
8615 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8616 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8617 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8618 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8619 }
8620
8621 /* For immediate of above form, return 0bABCD. */
8622
8623 static unsigned
8624 neon_squash_bits (unsigned imm)
8625 {
8626 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8627 | ((imm & 0x01000000) >> 21);
8628 }
8629
8630 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8631
8632 static unsigned
8633 neon_qfloat_bits (unsigned imm)
8634 {
8635 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8636 }
8637
8638 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8639 the instruction. *OP is passed as the initial value of the op field, and
8640 may be set to a different value depending on the constant (i.e.
8641 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8642 MVN). If the immediate looks like a repeated pattern then also
8643 try smaller element sizes. */
8644
8645 static int
8646 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8647 unsigned *immbits, int *op, int size,
8648 enum neon_el_type type)
8649 {
8650 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8651 float. */
8652 if (type == NT_float && !float_p)
8653 return FAIL;
8654
8655 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8656 {
8657 if (size != 32 || *op == 1)
8658 return FAIL;
8659 *immbits = neon_qfloat_bits (immlo);
8660 return 0xf;
8661 }
8662
8663 if (size == 64)
8664 {
8665 if (neon_bits_same_in_bytes (immhi)
8666 && neon_bits_same_in_bytes (immlo))
8667 {
8668 if (*op == 1)
8669 return FAIL;
8670 *immbits = (neon_squash_bits (immhi) << 4)
8671 | neon_squash_bits (immlo);
8672 *op = 1;
8673 return 0xe;
8674 }
8675
8676 if (immhi != immlo)
8677 return FAIL;
8678 }
8679
8680 if (size >= 32)
8681 {
8682 if (immlo == (immlo & 0x000000ff))
8683 {
8684 *immbits = immlo;
8685 return 0x0;
8686 }
8687 else if (immlo == (immlo & 0x0000ff00))
8688 {
8689 *immbits = immlo >> 8;
8690 return 0x2;
8691 }
8692 else if (immlo == (immlo & 0x00ff0000))
8693 {
8694 *immbits = immlo >> 16;
8695 return 0x4;
8696 }
8697 else if (immlo == (immlo & 0xff000000))
8698 {
8699 *immbits = immlo >> 24;
8700 return 0x6;
8701 }
8702 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8703 {
8704 *immbits = (immlo >> 8) & 0xff;
8705 return 0xc;
8706 }
8707 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8708 {
8709 *immbits = (immlo >> 16) & 0xff;
8710 return 0xd;
8711 }
8712
8713 if ((immlo & 0xffff) != (immlo >> 16))
8714 return FAIL;
8715 immlo &= 0xffff;
8716 }
8717
8718 if (size >= 16)
8719 {
8720 if (immlo == (immlo & 0x000000ff))
8721 {
8722 *immbits = immlo;
8723 return 0x8;
8724 }
8725 else if (immlo == (immlo & 0x0000ff00))
8726 {
8727 *immbits = immlo >> 8;
8728 return 0xa;
8729 }
8730
8731 if ((immlo & 0xff) != (immlo >> 8))
8732 return FAIL;
8733 immlo &= 0xff;
8734 }
8735
8736 if (immlo == (immlo & 0x000000ff))
8737 {
8738 /* Don't allow MVN with 8-bit immediate. */
8739 if (*op == 1)
8740 return FAIL;
8741 *immbits = immlo;
8742 return 0xe;
8743 }
8744
8745 return FAIL;
8746 }
8747
8748 #if defined BFD_HOST_64_BIT
8749 /* Returns TRUE if double precision value V may be cast
8750 to single precision without loss of accuracy. */
8751
8752 static bfd_boolean
8753 is_double_a_single (bfd_int64_t v)
8754 {
8755 int exp = (int)((v >> 52) & 0x7FF);
8756 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8757
8758 return (exp == 0 || exp == 0x7FF
8759 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8760 && (mantissa & 0x1FFFFFFFl) == 0;
8761 }
8762
8763 /* Returns a double precision value casted to single precision
8764 (ignoring the least significant bits in exponent and mantissa). */
8765
8766 static int
8767 double_to_single (bfd_int64_t v)
8768 {
8769 int sign = (int) ((v >> 63) & 1l);
8770 int exp = (int) ((v >> 52) & 0x7FF);
8771 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8772
8773 if (exp == 0x7FF)
8774 exp = 0xFF;
8775 else
8776 {
8777 exp = exp - 1023 + 127;
8778 if (exp >= 0xFF)
8779 {
8780 /* Infinity. */
8781 exp = 0x7F;
8782 mantissa = 0;
8783 }
8784 else if (exp < 0)
8785 {
8786 /* No denormalized numbers. */
8787 exp = 0;
8788 mantissa = 0;
8789 }
8790 }
8791 mantissa >>= 29;
8792 return (sign << 31) | (exp << 23) | mantissa;
8793 }
8794 #endif /* BFD_HOST_64_BIT */
8795
8796 enum lit_type
8797 {
8798 CONST_THUMB,
8799 CONST_ARM,
8800 CONST_VEC
8801 };
8802
8803 static void do_vfp_nsyn_opcode (const char *);
8804
8805 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8806 Determine whether it can be performed with a move instruction; if
8807 it can, convert inst.instruction to that move instruction and
8808 return TRUE; if it can't, convert inst.instruction to a literal-pool
8809 load and return FALSE. If this is not a valid thing to do in the
8810 current context, set inst.error and return TRUE.
8811
8812 inst.operands[i] describes the destination register. */
8813
8814 static bfd_boolean
8815 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8816 {
8817 unsigned long tbit;
8818 bfd_boolean thumb_p = (t == CONST_THUMB);
8819 bfd_boolean arm_p = (t == CONST_ARM);
8820
8821 if (thumb_p)
8822 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8823 else
8824 tbit = LOAD_BIT;
8825
8826 if ((inst.instruction & tbit) == 0)
8827 {
8828 inst.error = _("invalid pseudo operation");
8829 return TRUE;
8830 }
8831
8832 if (inst.relocs[0].exp.X_op != O_constant
8833 && inst.relocs[0].exp.X_op != O_symbol
8834 && inst.relocs[0].exp.X_op != O_big)
8835 {
8836 inst.error = _("constant expression expected");
8837 return TRUE;
8838 }
8839
8840 if (inst.relocs[0].exp.X_op == O_constant
8841 || inst.relocs[0].exp.X_op == O_big)
8842 {
8843 #if defined BFD_HOST_64_BIT
8844 bfd_int64_t v;
8845 #else
8846 offsetT v;
8847 #endif
8848 if (inst.relocs[0].exp.X_op == O_big)
8849 {
8850 LITTLENUM_TYPE w[X_PRECISION];
8851 LITTLENUM_TYPE * l;
8852
8853 if (inst.relocs[0].exp.X_add_number == -1)
8854 {
8855 gen_to_words (w, X_PRECISION, E_PRECISION);
8856 l = w;
8857 /* FIXME: Should we check words w[2..5] ? */
8858 }
8859 else
8860 l = generic_bignum;
8861
8862 #if defined BFD_HOST_64_BIT
8863 v =
8864 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8865 << LITTLENUM_NUMBER_OF_BITS)
8866 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8867 << LITTLENUM_NUMBER_OF_BITS)
8868 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8869 << LITTLENUM_NUMBER_OF_BITS)
8870 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8871 #else
8872 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8873 | (l[0] & LITTLENUM_MASK);
8874 #endif
8875 }
8876 else
8877 v = inst.relocs[0].exp.X_add_number;
8878
8879 if (!inst.operands[i].issingle)
8880 {
8881 if (thumb_p)
8882 {
8883 /* LDR should not use lead in a flag-setting instruction being
8884 chosen so we do not check whether movs can be used. */
8885
8886 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8887 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8888 && inst.operands[i].reg != 13
8889 && inst.operands[i].reg != 15)
8890 {
8891 /* Check if on thumb2 it can be done with a mov.w, mvn or
8892 movw instruction. */
8893 unsigned int newimm;
8894 bfd_boolean isNegated;
8895
8896 newimm = encode_thumb32_immediate (v);
8897 if (newimm != (unsigned int) FAIL)
8898 isNegated = FALSE;
8899 else
8900 {
8901 newimm = encode_thumb32_immediate (~v);
8902 if (newimm != (unsigned int) FAIL)
8903 isNegated = TRUE;
8904 }
8905
8906 /* The number can be loaded with a mov.w or mvn
8907 instruction. */
8908 if (newimm != (unsigned int) FAIL
8909 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8910 {
8911 inst.instruction = (0xf04f0000 /* MOV.W. */
8912 | (inst.operands[i].reg << 8));
8913 /* Change to MOVN. */
8914 inst.instruction |= (isNegated ? 0x200000 : 0);
8915 inst.instruction |= (newimm & 0x800) << 15;
8916 inst.instruction |= (newimm & 0x700) << 4;
8917 inst.instruction |= (newimm & 0x0ff);
8918 return TRUE;
8919 }
8920 /* The number can be loaded with a movw instruction. */
8921 else if ((v & ~0xFFFF) == 0
8922 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8923 {
8924 int imm = v & 0xFFFF;
8925
8926 inst.instruction = 0xf2400000; /* MOVW. */
8927 inst.instruction |= (inst.operands[i].reg << 8);
8928 inst.instruction |= (imm & 0xf000) << 4;
8929 inst.instruction |= (imm & 0x0800) << 15;
8930 inst.instruction |= (imm & 0x0700) << 4;
8931 inst.instruction |= (imm & 0x00ff);
8932 /* In case this replacement is being done on Armv8-M
8933 Baseline we need to make sure to disable the
8934 instruction size check, as otherwise GAS will reject
8935 the use of this T32 instruction. */
8936 inst.size_req = 0;
8937 return TRUE;
8938 }
8939 }
8940 }
8941 else if (arm_p)
8942 {
8943 int value = encode_arm_immediate (v);
8944
8945 if (value != FAIL)
8946 {
8947 /* This can be done with a mov instruction. */
8948 inst.instruction &= LITERAL_MASK;
8949 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8950 inst.instruction |= value & 0xfff;
8951 return TRUE;
8952 }
8953
8954 value = encode_arm_immediate (~ v);
8955 if (value != FAIL)
8956 {
8957 /* This can be done with a mvn instruction. */
8958 inst.instruction &= LITERAL_MASK;
8959 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8960 inst.instruction |= value & 0xfff;
8961 return TRUE;
8962 }
8963 }
8964 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8965 {
8966 int op = 0;
8967 unsigned immbits = 0;
8968 unsigned immlo = inst.operands[1].imm;
8969 unsigned immhi = inst.operands[1].regisimm
8970 ? inst.operands[1].reg
8971 : inst.relocs[0].exp.X_unsigned
8972 ? 0
8973 : ((bfd_int64_t)((int) immlo)) >> 32;
8974 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8975 &op, 64, NT_invtype);
8976
8977 if (cmode == FAIL)
8978 {
8979 neon_invert_size (&immlo, &immhi, 64);
8980 op = !op;
8981 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8982 &op, 64, NT_invtype);
8983 }
8984
8985 if (cmode != FAIL)
8986 {
8987 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8988 | (1 << 23)
8989 | (cmode << 8)
8990 | (op << 5)
8991 | (1 << 4);
8992
8993 /* Fill other bits in vmov encoding for both thumb and arm. */
8994 if (thumb_mode)
8995 inst.instruction |= (0x7U << 29) | (0xF << 24);
8996 else
8997 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8998 neon_write_immbits (immbits);
8999 return TRUE;
9000 }
9001 }
9002 }
9003
9004 if (t == CONST_VEC)
9005 {
9006 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
9007 if (inst.operands[i].issingle
9008 && is_quarter_float (inst.operands[1].imm)
9009 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
9010 {
9011 inst.operands[1].imm =
9012 neon_qfloat_bits (v);
9013 do_vfp_nsyn_opcode ("fconsts");
9014 return TRUE;
9015 }
9016
9017 /* If our host does not support a 64-bit type then we cannot perform
9018 the following optimization. This mean that there will be a
9019 discrepancy between the output produced by an assembler built for
9020 a 32-bit-only host and the output produced from a 64-bit host, but
9021 this cannot be helped. */
9022 #if defined BFD_HOST_64_BIT
9023 else if (!inst.operands[1].issingle
9024 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
9025 {
9026 if (is_double_a_single (v)
9027 && is_quarter_float (double_to_single (v)))
9028 {
9029 inst.operands[1].imm =
9030 neon_qfloat_bits (double_to_single (v));
9031 do_vfp_nsyn_opcode ("fconstd");
9032 return TRUE;
9033 }
9034 }
9035 #endif
9036 }
9037 }
9038
9039 if (add_to_lit_pool ((!inst.operands[i].isvec
9040 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
9041 return TRUE;
9042
9043 inst.operands[1].reg = REG_PC;
9044 inst.operands[1].isreg = 1;
9045 inst.operands[1].preind = 1;
9046 inst.relocs[0].pc_rel = 1;
9047 inst.relocs[0].type = (thumb_p
9048 ? BFD_RELOC_ARM_THUMB_OFFSET
9049 : (mode_3
9050 ? BFD_RELOC_ARM_HWLITERAL
9051 : BFD_RELOC_ARM_LITERAL));
9052 return FALSE;
9053 }
9054
9055 /* inst.operands[i] was set up by parse_address. Encode it into an
9056 ARM-format instruction. Reject all forms which cannot be encoded
9057 into a coprocessor load/store instruction. If wb_ok is false,
9058 reject use of writeback; if unind_ok is false, reject use of
9059 unindexed addressing. If reloc_override is not 0, use it instead
9060 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9061 (in which case it is preserved). */
9062
9063 static int
9064 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9065 {
9066 if (!inst.operands[i].isreg)
9067 {
9068 /* PR 18256 */
9069 if (! inst.operands[0].isvec)
9070 {
9071 inst.error = _("invalid co-processor operand");
9072 return FAIL;
9073 }
9074 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
9075 return SUCCESS;
9076 }
9077
9078 inst.instruction |= inst.operands[i].reg << 16;
9079
9080 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9081
9082 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9083 {
9084 gas_assert (!inst.operands[i].writeback);
9085 if (!unind_ok)
9086 {
9087 inst.error = _("instruction does not support unindexed addressing");
9088 return FAIL;
9089 }
9090 inst.instruction |= inst.operands[i].imm;
9091 inst.instruction |= INDEX_UP;
9092 return SUCCESS;
9093 }
9094
9095 if (inst.operands[i].preind)
9096 inst.instruction |= PRE_INDEX;
9097
9098 if (inst.operands[i].writeback)
9099 {
9100 if (inst.operands[i].reg == REG_PC)
9101 {
9102 inst.error = _("pc may not be used with write-back");
9103 return FAIL;
9104 }
9105 if (!wb_ok)
9106 {
9107 inst.error = _("instruction does not support writeback");
9108 return FAIL;
9109 }
9110 inst.instruction |= WRITE_BACK;
9111 }
9112
9113 if (reloc_override)
9114 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9115 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9116 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9117 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9118 {
9119 if (thumb_mode)
9120 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9121 else
9122 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9123 }
9124
9125 /* Prefer + for zero encoded value. */
9126 if (!inst.operands[i].negative)
9127 inst.instruction |= INDEX_UP;
9128
9129 return SUCCESS;
9130 }
9131
9132 /* Functions for instruction encoding, sorted by sub-architecture.
9133 First some generics; their names are taken from the conventional
9134 bit positions for register arguments in ARM format instructions. */
9135
9136 static void
9137 do_noargs (void)
9138 {
9139 }
9140
9141 static void
9142 do_rd (void)
9143 {
9144 inst.instruction |= inst.operands[0].reg << 12;
9145 }
9146
9147 static void
9148 do_rn (void)
9149 {
9150 inst.instruction |= inst.operands[0].reg << 16;
9151 }
9152
9153 static void
9154 do_rd_rm (void)
9155 {
9156 inst.instruction |= inst.operands[0].reg << 12;
9157 inst.instruction |= inst.operands[1].reg;
9158 }
9159
9160 static void
9161 do_rm_rn (void)
9162 {
9163 inst.instruction |= inst.operands[0].reg;
9164 inst.instruction |= inst.operands[1].reg << 16;
9165 }
9166
9167 static void
9168 do_rd_rn (void)
9169 {
9170 inst.instruction |= inst.operands[0].reg << 12;
9171 inst.instruction |= inst.operands[1].reg << 16;
9172 }
9173
9174 static void
9175 do_rn_rd (void)
9176 {
9177 inst.instruction |= inst.operands[0].reg << 16;
9178 inst.instruction |= inst.operands[1].reg << 12;
9179 }
9180
9181 static void
9182 do_tt (void)
9183 {
9184 inst.instruction |= inst.operands[0].reg << 8;
9185 inst.instruction |= inst.operands[1].reg << 16;
9186 }
9187
9188 static bfd_boolean
9189 check_obsolete (const arm_feature_set *feature, const char *msg)
9190 {
9191 if (ARM_CPU_IS_ANY (cpu_variant))
9192 {
9193 as_tsktsk ("%s", msg);
9194 return TRUE;
9195 }
9196 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9197 {
9198 as_bad ("%s", msg);
9199 return TRUE;
9200 }
9201
9202 return FALSE;
9203 }
9204
9205 static void
9206 do_rd_rm_rn (void)
9207 {
9208 unsigned Rn = inst.operands[2].reg;
9209 /* Enforce restrictions on SWP instruction. */
9210 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9211 {
9212 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9213 _("Rn must not overlap other operands"));
9214
9215 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9216 */
9217 if (!check_obsolete (&arm_ext_v8,
9218 _("swp{b} use is obsoleted for ARMv8 and later"))
9219 && warn_on_deprecated
9220 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9221 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9222 }
9223
9224 inst.instruction |= inst.operands[0].reg << 12;
9225 inst.instruction |= inst.operands[1].reg;
9226 inst.instruction |= Rn << 16;
9227 }
9228
9229 static void
9230 do_rd_rn_rm (void)
9231 {
9232 inst.instruction |= inst.operands[0].reg << 12;
9233 inst.instruction |= inst.operands[1].reg << 16;
9234 inst.instruction |= inst.operands[2].reg;
9235 }
9236
9237 static void
9238 do_rm_rd_rn (void)
9239 {
9240 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9241 constraint (((inst.relocs[0].exp.X_op != O_constant
9242 && inst.relocs[0].exp.X_op != O_illegal)
9243 || inst.relocs[0].exp.X_add_number != 0),
9244 BAD_ADDR_MODE);
9245 inst.instruction |= inst.operands[0].reg;
9246 inst.instruction |= inst.operands[1].reg << 12;
9247 inst.instruction |= inst.operands[2].reg << 16;
9248 }
9249
9250 static void
9251 do_imm0 (void)
9252 {
9253 inst.instruction |= inst.operands[0].imm;
9254 }
9255
9256 static void
9257 do_rd_cpaddr (void)
9258 {
9259 inst.instruction |= inst.operands[0].reg << 12;
9260 encode_arm_cp_address (1, TRUE, TRUE, 0);
9261 }
9262
9263 /* ARM instructions, in alphabetical order by function name (except
9264 that wrapper functions appear immediately after the function they
9265 wrap). */
9266
9267 /* This is a pseudo-op of the form "adr rd, label" to be converted
9268 into a relative address of the form "add rd, pc, #label-.-8". */
9269
9270 static void
9271 do_adr (void)
9272 {
9273 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9274
9275 /* Frag hacking will turn this into a sub instruction if the offset turns
9276 out to be negative. */
9277 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9278 inst.relocs[0].pc_rel = 1;
9279 inst.relocs[0].exp.X_add_number -= 8;
9280
9281 if (support_interwork
9282 && inst.relocs[0].exp.X_op == O_symbol
9283 && inst.relocs[0].exp.X_add_symbol != NULL
9284 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9285 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9286 inst.relocs[0].exp.X_add_number |= 1;
9287 }
9288
9289 /* This is a pseudo-op of the form "adrl rd, label" to be converted
9290 into a relative address of the form:
9291 add rd, pc, #low(label-.-8)"
9292 add rd, rd, #high(label-.-8)" */
9293
9294 static void
9295 do_adrl (void)
9296 {
9297 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9298
9299 /* Frag hacking will turn this into a sub instruction if the offset turns
9300 out to be negative. */
9301 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9302 inst.relocs[0].pc_rel = 1;
9303 inst.size = INSN_SIZE * 2;
9304 inst.relocs[0].exp.X_add_number -= 8;
9305
9306 if (support_interwork
9307 && inst.relocs[0].exp.X_op == O_symbol
9308 && inst.relocs[0].exp.X_add_symbol != NULL
9309 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9310 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9311 inst.relocs[0].exp.X_add_number |= 1;
9312 }
9313
9314 static void
9315 do_arit (void)
9316 {
9317 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9318 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9319 THUMB1_RELOC_ONLY);
9320 if (!inst.operands[1].present)
9321 inst.operands[1].reg = inst.operands[0].reg;
9322 inst.instruction |= inst.operands[0].reg << 12;
9323 inst.instruction |= inst.operands[1].reg << 16;
9324 encode_arm_shifter_operand (2);
9325 }
9326
9327 static void
9328 do_barrier (void)
9329 {
9330 if (inst.operands[0].present)
9331 inst.instruction |= inst.operands[0].imm;
9332 else
9333 inst.instruction |= 0xf;
9334 }
9335
9336 static void
9337 do_bfc (void)
9338 {
9339 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9340 constraint (msb > 32, _("bit-field extends past end of register"));
9341 /* The instruction encoding stores the LSB and MSB,
9342 not the LSB and width. */
9343 inst.instruction |= inst.operands[0].reg << 12;
9344 inst.instruction |= inst.operands[1].imm << 7;
9345 inst.instruction |= (msb - 1) << 16;
9346 }
9347
9348 static void
9349 do_bfi (void)
9350 {
9351 unsigned int msb;
9352
9353 /* #0 in second position is alternative syntax for bfc, which is
9354 the same instruction but with REG_PC in the Rm field. */
9355 if (!inst.operands[1].isreg)
9356 inst.operands[1].reg = REG_PC;
9357
9358 msb = inst.operands[2].imm + inst.operands[3].imm;
9359 constraint (msb > 32, _("bit-field extends past end of register"));
9360 /* The instruction encoding stores the LSB and MSB,
9361 not the LSB and width. */
9362 inst.instruction |= inst.operands[0].reg << 12;
9363 inst.instruction |= inst.operands[1].reg;
9364 inst.instruction |= inst.operands[2].imm << 7;
9365 inst.instruction |= (msb - 1) << 16;
9366 }
9367
9368 static void
9369 do_bfx (void)
9370 {
9371 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9372 _("bit-field extends past end of register"));
9373 inst.instruction |= inst.operands[0].reg << 12;
9374 inst.instruction |= inst.operands[1].reg;
9375 inst.instruction |= inst.operands[2].imm << 7;
9376 inst.instruction |= (inst.operands[3].imm - 1) << 16;
9377 }
9378
9379 /* ARM V5 breakpoint instruction (argument parse)
9380 BKPT <16 bit unsigned immediate>
9381 Instruction is not conditional.
9382 The bit pattern given in insns[] has the COND_ALWAYS condition,
9383 and it is an error if the caller tried to override that. */
9384
9385 static void
9386 do_bkpt (void)
9387 {
9388 /* Top 12 of 16 bits to bits 19:8. */
9389 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9390
9391 /* Bottom 4 of 16 bits to bits 3:0. */
9392 inst.instruction |= inst.operands[0].imm & 0xf;
9393 }
9394
9395 static void
9396 encode_branch (int default_reloc)
9397 {
9398 if (inst.operands[0].hasreloc)
9399 {
9400 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9401 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9402 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9403 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9404 ? BFD_RELOC_ARM_PLT32
9405 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9406 }
9407 else
9408 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9409 inst.relocs[0].pc_rel = 1;
9410 }
9411
9412 static void
9413 do_branch (void)
9414 {
9415 #ifdef OBJ_ELF
9416 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9417 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9418 else
9419 #endif
9420 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9421 }
9422
9423 static void
9424 do_bl (void)
9425 {
9426 #ifdef OBJ_ELF
9427 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9428 {
9429 if (inst.cond == COND_ALWAYS)
9430 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9431 else
9432 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9433 }
9434 else
9435 #endif
9436 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9437 }
9438
9439 /* ARM V5 branch-link-exchange instruction (argument parse)
9440 BLX <target_addr> ie BLX(1)
9441 BLX{<condition>} <Rm> ie BLX(2)
9442 Unfortunately, there are two different opcodes for this mnemonic.
9443 So, the insns[].value is not used, and the code here zaps values
9444 into inst.instruction.
9445 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9446
9447 static void
9448 do_blx (void)
9449 {
9450 if (inst.operands[0].isreg)
9451 {
9452 /* Arg is a register; the opcode provided by insns[] is correct.
9453 It is not illegal to do "blx pc", just useless. */
9454 if (inst.operands[0].reg == REG_PC)
9455 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9456
9457 inst.instruction |= inst.operands[0].reg;
9458 }
9459 else
9460 {
9461 /* Arg is an address; this instruction cannot be executed
9462 conditionally, and the opcode must be adjusted.
9463 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9464 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9465 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9466 inst.instruction = 0xfa000000;
9467 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9468 }
9469 }
9470
9471 static void
9472 do_bx (void)
9473 {
9474 bfd_boolean want_reloc;
9475
9476 if (inst.operands[0].reg == REG_PC)
9477 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9478
9479 inst.instruction |= inst.operands[0].reg;
9480 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9481 it is for ARMv4t or earlier. */
9482 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9483 if (!ARM_FEATURE_ZERO (selected_object_arch)
9484 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9485 want_reloc = TRUE;
9486
9487 #ifdef OBJ_ELF
9488 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9489 #endif
9490 want_reloc = FALSE;
9491
9492 if (want_reloc)
9493 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9494 }
9495
9496
9497 /* ARM v5TEJ. Jump to Jazelle code. */
9498
9499 static void
9500 do_bxj (void)
9501 {
9502 if (inst.operands[0].reg == REG_PC)
9503 as_tsktsk (_("use of r15 in bxj is not really useful"));
9504
9505 inst.instruction |= inst.operands[0].reg;
9506 }
9507
9508 /* Co-processor data operation:
9509 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9510 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9511 static void
9512 do_cdp (void)
9513 {
9514 inst.instruction |= inst.operands[0].reg << 8;
9515 inst.instruction |= inst.operands[1].imm << 20;
9516 inst.instruction |= inst.operands[2].reg << 12;
9517 inst.instruction |= inst.operands[3].reg << 16;
9518 inst.instruction |= inst.operands[4].reg;
9519 inst.instruction |= inst.operands[5].imm << 5;
9520 }
9521
9522 static void
9523 do_cmp (void)
9524 {
9525 inst.instruction |= inst.operands[0].reg << 16;
9526 encode_arm_shifter_operand (1);
9527 }
9528
9529 /* Transfer between coprocessor and ARM registers.
9530 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9531 MRC2
9532 MCR{cond}
9533 MCR2
9534
9535 No special properties. */
9536
9537 struct deprecated_coproc_regs_s
9538 {
9539 unsigned cp;
9540 int opc1;
9541 unsigned crn;
9542 unsigned crm;
9543 int opc2;
9544 arm_feature_set deprecated;
9545 arm_feature_set obsoleted;
9546 const char *dep_msg;
9547 const char *obs_msg;
9548 };
9549
9550 #define DEPR_ACCESS_V8 \
9551 N_("This coprocessor register access is deprecated in ARMv8")
9552
9553 /* Table of all deprecated coprocessor registers. */
9554 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9555 {
9556 {15, 0, 7, 10, 5, /* CP15DMB. */
9557 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9558 DEPR_ACCESS_V8, NULL},
9559 {15, 0, 7, 10, 4, /* CP15DSB. */
9560 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9561 DEPR_ACCESS_V8, NULL},
9562 {15, 0, 7, 5, 4, /* CP15ISB. */
9563 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9564 DEPR_ACCESS_V8, NULL},
9565 {14, 6, 1, 0, 0, /* TEEHBR. */
9566 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9567 DEPR_ACCESS_V8, NULL},
9568 {14, 6, 0, 0, 0, /* TEECR. */
9569 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9570 DEPR_ACCESS_V8, NULL},
9571 };
9572
9573 #undef DEPR_ACCESS_V8
9574
9575 static const size_t deprecated_coproc_reg_count =
9576 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9577
9578 static void
9579 do_co_reg (void)
9580 {
9581 unsigned Rd;
9582 size_t i;
9583
9584 Rd = inst.operands[2].reg;
9585 if (thumb_mode)
9586 {
9587 if (inst.instruction == 0xee000010
9588 || inst.instruction == 0xfe000010)
9589 /* MCR, MCR2 */
9590 reject_bad_reg (Rd);
9591 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9592 /* MRC, MRC2 */
9593 constraint (Rd == REG_SP, BAD_SP);
9594 }
9595 else
9596 {
9597 /* MCR */
9598 if (inst.instruction == 0xe000010)
9599 constraint (Rd == REG_PC, BAD_PC);
9600 }
9601
9602 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9603 {
9604 const struct deprecated_coproc_regs_s *r =
9605 deprecated_coproc_regs + i;
9606
9607 if (inst.operands[0].reg == r->cp
9608 && inst.operands[1].imm == r->opc1
9609 && inst.operands[3].reg == r->crn
9610 && inst.operands[4].reg == r->crm
9611 && inst.operands[5].imm == r->opc2)
9612 {
9613 if (! ARM_CPU_IS_ANY (cpu_variant)
9614 && warn_on_deprecated
9615 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9616 as_tsktsk ("%s", r->dep_msg);
9617 }
9618 }
9619
9620 inst.instruction |= inst.operands[0].reg << 8;
9621 inst.instruction |= inst.operands[1].imm << 21;
9622 inst.instruction |= Rd << 12;
9623 inst.instruction |= inst.operands[3].reg << 16;
9624 inst.instruction |= inst.operands[4].reg;
9625 inst.instruction |= inst.operands[5].imm << 5;
9626 }
9627
9628 /* Transfer between coprocessor register and pair of ARM registers.
9629 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9630 MCRR2
9631 MRRC{cond}
9632 MRRC2
9633
9634 Two XScale instructions are special cases of these:
9635
9636 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9637 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9638
9639 Result unpredictable if Rd or Rn is R15. */
9640
9641 static void
9642 do_co_reg2c (void)
9643 {
9644 unsigned Rd, Rn;
9645
9646 Rd = inst.operands[2].reg;
9647 Rn = inst.operands[3].reg;
9648
9649 if (thumb_mode)
9650 {
9651 reject_bad_reg (Rd);
9652 reject_bad_reg (Rn);
9653 }
9654 else
9655 {
9656 constraint (Rd == REG_PC, BAD_PC);
9657 constraint (Rn == REG_PC, BAD_PC);
9658 }
9659
9660 /* Only check the MRRC{2} variants. */
9661 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9662 {
9663 /* If Rd == Rn, error that the operation is
9664 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9665 constraint (Rd == Rn, BAD_OVERLAP);
9666 }
9667
9668 inst.instruction |= inst.operands[0].reg << 8;
9669 inst.instruction |= inst.operands[1].imm << 4;
9670 inst.instruction |= Rd << 12;
9671 inst.instruction |= Rn << 16;
9672 inst.instruction |= inst.operands[4].reg;
9673 }
9674
9675 static void
9676 do_cpsi (void)
9677 {
9678 inst.instruction |= inst.operands[0].imm << 6;
9679 if (inst.operands[1].present)
9680 {
9681 inst.instruction |= CPSI_MMOD;
9682 inst.instruction |= inst.operands[1].imm;
9683 }
9684 }
9685
9686 static void
9687 do_dbg (void)
9688 {
9689 inst.instruction |= inst.operands[0].imm;
9690 }
9691
9692 static void
9693 do_div (void)
9694 {
9695 unsigned Rd, Rn, Rm;
9696
9697 Rd = inst.operands[0].reg;
9698 Rn = (inst.operands[1].present
9699 ? inst.operands[1].reg : Rd);
9700 Rm = inst.operands[2].reg;
9701
9702 constraint ((Rd == REG_PC), BAD_PC);
9703 constraint ((Rn == REG_PC), BAD_PC);
9704 constraint ((Rm == REG_PC), BAD_PC);
9705
9706 inst.instruction |= Rd << 16;
9707 inst.instruction |= Rn << 0;
9708 inst.instruction |= Rm << 8;
9709 }
9710
9711 static void
9712 do_it (void)
9713 {
9714 /* There is no IT instruction in ARM mode. We
9715 process it to do the validation as if in
9716 thumb mode, just in case the code gets
9717 assembled for thumb using the unified syntax. */
9718
9719 inst.size = 0;
9720 if (unified_syntax)
9721 {
9722 set_pred_insn_type (IT_INSN);
9723 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9724 now_pred.cc = inst.operands[0].imm;
9725 }
9726 }
9727
9728 /* If there is only one register in the register list,
9729 then return its register number. Otherwise return -1. */
9730 static int
9731 only_one_reg_in_list (int range)
9732 {
9733 int i = ffs (range) - 1;
9734 return (i > 15 || range != (1 << i)) ? -1 : i;
9735 }
9736
9737 static void
9738 encode_ldmstm(int from_push_pop_mnem)
9739 {
9740 int base_reg = inst.operands[0].reg;
9741 int range = inst.operands[1].imm;
9742 int one_reg;
9743
9744 inst.instruction |= base_reg << 16;
9745 inst.instruction |= range;
9746
9747 if (inst.operands[1].writeback)
9748 inst.instruction |= LDM_TYPE_2_OR_3;
9749
9750 if (inst.operands[0].writeback)
9751 {
9752 inst.instruction |= WRITE_BACK;
9753 /* Check for unpredictable uses of writeback. */
9754 if (inst.instruction & LOAD_BIT)
9755 {
9756 /* Not allowed in LDM type 2. */
9757 if ((inst.instruction & LDM_TYPE_2_OR_3)
9758 && ((range & (1 << REG_PC)) == 0))
9759 as_warn (_("writeback of base register is UNPREDICTABLE"));
9760 /* Only allowed if base reg not in list for other types. */
9761 else if (range & (1 << base_reg))
9762 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9763 }
9764 else /* STM. */
9765 {
9766 /* Not allowed for type 2. */
9767 if (inst.instruction & LDM_TYPE_2_OR_3)
9768 as_warn (_("writeback of base register is UNPREDICTABLE"));
9769 /* Only allowed if base reg not in list, or first in list. */
9770 else if ((range & (1 << base_reg))
9771 && (range & ((1 << base_reg) - 1)))
9772 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9773 }
9774 }
9775
9776 /* If PUSH/POP has only one register, then use the A2 encoding. */
9777 one_reg = only_one_reg_in_list (range);
9778 if (from_push_pop_mnem && one_reg >= 0)
9779 {
9780 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9781
9782 if (is_push && one_reg == 13 /* SP */)
9783 /* PR 22483: The A2 encoding cannot be used when
9784 pushing the stack pointer as this is UNPREDICTABLE. */
9785 return;
9786
9787 inst.instruction &= A_COND_MASK;
9788 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9789 inst.instruction |= one_reg << 12;
9790 }
9791 }
9792
9793 static void
9794 do_ldmstm (void)
9795 {
9796 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9797 }
9798
9799 /* ARMv5TE load-consecutive (argument parse)
9800 Mode is like LDRH.
9801
9802 LDRccD R, mode
9803 STRccD R, mode. */
9804
9805 static void
9806 do_ldrd (void)
9807 {
9808 constraint (inst.operands[0].reg % 2 != 0,
9809 _("first transfer register must be even"));
9810 constraint (inst.operands[1].present
9811 && inst.operands[1].reg != inst.operands[0].reg + 1,
9812 _("can only transfer two consecutive registers"));
9813 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9814 constraint (!inst.operands[2].isreg, _("'[' expected"));
9815
9816 if (!inst.operands[1].present)
9817 inst.operands[1].reg = inst.operands[0].reg + 1;
9818
9819 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9820 register and the first register written; we have to diagnose
9821 overlap between the base and the second register written here. */
9822
9823 if (inst.operands[2].reg == inst.operands[1].reg
9824 && (inst.operands[2].writeback || inst.operands[2].postind))
9825 as_warn (_("base register written back, and overlaps "
9826 "second transfer register"));
9827
9828 if (!(inst.instruction & V4_STR_BIT))
9829 {
9830 /* For an index-register load, the index register must not overlap the
9831 destination (even if not write-back). */
9832 if (inst.operands[2].immisreg
9833 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9834 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9835 as_warn (_("index register overlaps transfer register"));
9836 }
9837 inst.instruction |= inst.operands[0].reg << 12;
9838 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9839 }
9840
9841 static void
9842 do_ldrex (void)
9843 {
9844 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9845 || inst.operands[1].postind || inst.operands[1].writeback
9846 || inst.operands[1].immisreg || inst.operands[1].shifted
9847 || inst.operands[1].negative
9848 /* This can arise if the programmer has written
9849 strex rN, rM, foo
9850 or if they have mistakenly used a register name as the last
9851 operand, eg:
9852 strex rN, rM, rX
9853 It is very difficult to distinguish between these two cases
9854 because "rX" might actually be a label. ie the register
9855 name has been occluded by a symbol of the same name. So we
9856 just generate a general 'bad addressing mode' type error
9857 message and leave it up to the programmer to discover the
9858 true cause and fix their mistake. */
9859 || (inst.operands[1].reg == REG_PC),
9860 BAD_ADDR_MODE);
9861
9862 constraint (inst.relocs[0].exp.X_op != O_constant
9863 || inst.relocs[0].exp.X_add_number != 0,
9864 _("offset must be zero in ARM encoding"));
9865
9866 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9867
9868 inst.instruction |= inst.operands[0].reg << 12;
9869 inst.instruction |= inst.operands[1].reg << 16;
9870 inst.relocs[0].type = BFD_RELOC_UNUSED;
9871 }
9872
9873 static void
9874 do_ldrexd (void)
9875 {
9876 constraint (inst.operands[0].reg % 2 != 0,
9877 _("even register required"));
9878 constraint (inst.operands[1].present
9879 && inst.operands[1].reg != inst.operands[0].reg + 1,
9880 _("can only load two consecutive registers"));
9881 /* If op 1 were present and equal to PC, this function wouldn't
9882 have been called in the first place. */
9883 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9884
9885 inst.instruction |= inst.operands[0].reg << 12;
9886 inst.instruction |= inst.operands[2].reg << 16;
9887 }
9888
9889 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9890 which is not a multiple of four is UNPREDICTABLE. */
9891 static void
9892 check_ldr_r15_aligned (void)
9893 {
9894 constraint (!(inst.operands[1].immisreg)
9895 && (inst.operands[0].reg == REG_PC
9896 && inst.operands[1].reg == REG_PC
9897 && (inst.relocs[0].exp.X_add_number & 0x3)),
9898 _("ldr to register 15 must be 4-byte aligned"));
9899 }
9900
9901 static void
9902 do_ldst (void)
9903 {
9904 inst.instruction |= inst.operands[0].reg << 12;
9905 if (!inst.operands[1].isreg)
9906 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9907 return;
9908 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9909 check_ldr_r15_aligned ();
9910 }
9911
9912 static void
9913 do_ldstt (void)
9914 {
9915 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9916 reject [Rn,...]. */
9917 if (inst.operands[1].preind)
9918 {
9919 constraint (inst.relocs[0].exp.X_op != O_constant
9920 || inst.relocs[0].exp.X_add_number != 0,
9921 _("this instruction requires a post-indexed address"));
9922
9923 inst.operands[1].preind = 0;
9924 inst.operands[1].postind = 1;
9925 inst.operands[1].writeback = 1;
9926 }
9927 inst.instruction |= inst.operands[0].reg << 12;
9928 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9929 }
9930
9931 /* Halfword and signed-byte load/store operations. */
9932
9933 static void
9934 do_ldstv4 (void)
9935 {
9936 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9937 inst.instruction |= inst.operands[0].reg << 12;
9938 if (!inst.operands[1].isreg)
9939 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9940 return;
9941 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9942 }
9943
9944 static void
9945 do_ldsttv4 (void)
9946 {
9947 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9948 reject [Rn,...]. */
9949 if (inst.operands[1].preind)
9950 {
9951 constraint (inst.relocs[0].exp.X_op != O_constant
9952 || inst.relocs[0].exp.X_add_number != 0,
9953 _("this instruction requires a post-indexed address"));
9954
9955 inst.operands[1].preind = 0;
9956 inst.operands[1].postind = 1;
9957 inst.operands[1].writeback = 1;
9958 }
9959 inst.instruction |= inst.operands[0].reg << 12;
9960 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9961 }
9962
9963 /* Co-processor register load/store.
9964 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9965 static void
9966 do_lstc (void)
9967 {
9968 inst.instruction |= inst.operands[0].reg << 8;
9969 inst.instruction |= inst.operands[1].reg << 12;
9970 encode_arm_cp_address (2, TRUE, TRUE, 0);
9971 }
9972
9973 static void
9974 do_mlas (void)
9975 {
9976 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9977 if (inst.operands[0].reg == inst.operands[1].reg
9978 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9979 && !(inst.instruction & 0x00400000))
9980 as_tsktsk (_("Rd and Rm should be different in mla"));
9981
9982 inst.instruction |= inst.operands[0].reg << 16;
9983 inst.instruction |= inst.operands[1].reg;
9984 inst.instruction |= inst.operands[2].reg << 8;
9985 inst.instruction |= inst.operands[3].reg << 12;
9986 }
9987
9988 static void
9989 do_mov (void)
9990 {
9991 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9992 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9993 THUMB1_RELOC_ONLY);
9994 inst.instruction |= inst.operands[0].reg << 12;
9995 encode_arm_shifter_operand (1);
9996 }
9997
9998 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9999 static void
10000 do_mov16 (void)
10001 {
10002 bfd_vma imm;
10003 bfd_boolean top;
10004
10005 top = (inst.instruction & 0x00400000) != 0;
10006 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
10007 _(":lower16: not allowed in this instruction"));
10008 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
10009 _(":upper16: not allowed in this instruction"));
10010 inst.instruction |= inst.operands[0].reg << 12;
10011 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
10012 {
10013 imm = inst.relocs[0].exp.X_add_number;
10014 /* The value is in two pieces: 0:11, 16:19. */
10015 inst.instruction |= (imm & 0x00000fff);
10016 inst.instruction |= (imm & 0x0000f000) << 4;
10017 }
10018 }
10019
10020 static int
10021 do_vfp_nsyn_mrs (void)
10022 {
10023 if (inst.operands[0].isvec)
10024 {
10025 if (inst.operands[1].reg != 1)
10026 first_error (_("operand 1 must be FPSCR"));
10027 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
10028 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
10029 do_vfp_nsyn_opcode ("fmstat");
10030 }
10031 else if (inst.operands[1].isvec)
10032 do_vfp_nsyn_opcode ("fmrx");
10033 else
10034 return FAIL;
10035
10036 return SUCCESS;
10037 }
10038
10039 static int
10040 do_vfp_nsyn_msr (void)
10041 {
10042 if (inst.operands[0].isvec)
10043 do_vfp_nsyn_opcode ("fmxr");
10044 else
10045 return FAIL;
10046
10047 return SUCCESS;
10048 }
10049
10050 static void
10051 do_vmrs (void)
10052 {
10053 unsigned Rt = inst.operands[0].reg;
10054
10055 if (thumb_mode && Rt == REG_SP)
10056 {
10057 inst.error = BAD_SP;
10058 return;
10059 }
10060
10061 switch (inst.operands[1].reg)
10062 {
10063 /* MVFR2 is only valid for Armv8-A. */
10064 case 5:
10065 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10066 _(BAD_FPU));
10067 break;
10068
10069 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10070 case 1: /* fpscr. */
10071 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10072 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10073 _(BAD_FPU));
10074 break;
10075
10076 case 14: /* fpcxt_ns. */
10077 case 15: /* fpcxt_s. */
10078 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10079 _("selected processor does not support instruction"));
10080 break;
10081
10082 case 2: /* fpscr_nzcvqc. */
10083 case 12: /* vpr. */
10084 case 13: /* p0. */
10085 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10086 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10087 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10088 _("selected processor does not support instruction"));
10089 if (inst.operands[0].reg != 2
10090 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10091 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10092 break;
10093
10094 default:
10095 break;
10096 }
10097
10098 /* APSR_ sets isvec. All other refs to PC are illegal. */
10099 if (!inst.operands[0].isvec && Rt == REG_PC)
10100 {
10101 inst.error = BAD_PC;
10102 return;
10103 }
10104
10105 /* If we get through parsing the register name, we just insert the number
10106 generated into the instruction without further validation. */
10107 inst.instruction |= (inst.operands[1].reg << 16);
10108 inst.instruction |= (Rt << 12);
10109 }
10110
10111 static void
10112 do_vmsr (void)
10113 {
10114 unsigned Rt = inst.operands[1].reg;
10115
10116 if (thumb_mode)
10117 reject_bad_reg (Rt);
10118 else if (Rt == REG_PC)
10119 {
10120 inst.error = BAD_PC;
10121 return;
10122 }
10123
10124 switch (inst.operands[0].reg)
10125 {
10126 /* MVFR2 is only valid for Armv8-A. */
10127 case 5:
10128 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10129 _(BAD_FPU));
10130 break;
10131
10132 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10133 case 1: /* fpcr. */
10134 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10135 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10136 _(BAD_FPU));
10137 break;
10138
10139 case 14: /* fpcxt_ns. */
10140 case 15: /* fpcxt_s. */
10141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10142 _("selected processor does not support instruction"));
10143 break;
10144
10145 case 2: /* fpscr_nzcvqc. */
10146 case 12: /* vpr. */
10147 case 13: /* p0. */
10148 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10149 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10150 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10151 _("selected processor does not support instruction"));
10152 if (inst.operands[0].reg != 2
10153 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10154 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10155 break;
10156
10157 default:
10158 break;
10159 }
10160
10161 /* If we get through parsing the register name, we just insert the number
10162 generated into the instruction without further validation. */
10163 inst.instruction |= (inst.operands[0].reg << 16);
10164 inst.instruction |= (Rt << 12);
10165 }
10166
10167 static void
10168 do_mrs (void)
10169 {
10170 unsigned br;
10171
10172 if (do_vfp_nsyn_mrs () == SUCCESS)
10173 return;
10174
10175 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10176 inst.instruction |= inst.operands[0].reg << 12;
10177
10178 if (inst.operands[1].isreg)
10179 {
10180 br = inst.operands[1].reg;
10181 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10182 as_bad (_("bad register for mrs"));
10183 }
10184 else
10185 {
10186 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10187 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10188 != (PSR_c|PSR_f),
10189 _("'APSR', 'CPSR' or 'SPSR' expected"));
10190 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10191 }
10192
10193 inst.instruction |= br;
10194 }
10195
10196 /* Two possible forms:
10197 "{C|S}PSR_<field>, Rm",
10198 "{C|S}PSR_f, #expression". */
10199
10200 static void
10201 do_msr (void)
10202 {
10203 if (do_vfp_nsyn_msr () == SUCCESS)
10204 return;
10205
10206 inst.instruction |= inst.operands[0].imm;
10207 if (inst.operands[1].isreg)
10208 inst.instruction |= inst.operands[1].reg;
10209 else
10210 {
10211 inst.instruction |= INST_IMMEDIATE;
10212 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10213 inst.relocs[0].pc_rel = 0;
10214 }
10215 }
10216
10217 static void
10218 do_mul (void)
10219 {
10220 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10221
10222 if (!inst.operands[2].present)
10223 inst.operands[2].reg = inst.operands[0].reg;
10224 inst.instruction |= inst.operands[0].reg << 16;
10225 inst.instruction |= inst.operands[1].reg;
10226 inst.instruction |= inst.operands[2].reg << 8;
10227
10228 if (inst.operands[0].reg == inst.operands[1].reg
10229 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10230 as_tsktsk (_("Rd and Rm should be different in mul"));
10231 }
10232
10233 /* Long Multiply Parser
10234 UMULL RdLo, RdHi, Rm, Rs
10235 SMULL RdLo, RdHi, Rm, Rs
10236 UMLAL RdLo, RdHi, Rm, Rs
10237 SMLAL RdLo, RdHi, Rm, Rs. */
10238
10239 static void
10240 do_mull (void)
10241 {
10242 inst.instruction |= inst.operands[0].reg << 12;
10243 inst.instruction |= inst.operands[1].reg << 16;
10244 inst.instruction |= inst.operands[2].reg;
10245 inst.instruction |= inst.operands[3].reg << 8;
10246
10247 /* rdhi and rdlo must be different. */
10248 if (inst.operands[0].reg == inst.operands[1].reg)
10249 as_tsktsk (_("rdhi and rdlo must be different"));
10250
10251 /* rdhi, rdlo and rm must all be different before armv6. */
10252 if ((inst.operands[0].reg == inst.operands[2].reg
10253 || inst.operands[1].reg == inst.operands[2].reg)
10254 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10255 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10256 }
10257
10258 static void
10259 do_nop (void)
10260 {
10261 if (inst.operands[0].present
10262 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10263 {
10264 /* Architectural NOP hints are CPSR sets with no bits selected. */
10265 inst.instruction &= 0xf0000000;
10266 inst.instruction |= 0x0320f000;
10267 if (inst.operands[0].present)
10268 inst.instruction |= inst.operands[0].imm;
10269 }
10270 }
10271
10272 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10273 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10274 Condition defaults to COND_ALWAYS.
10275 Error if Rd, Rn or Rm are R15. */
10276
10277 static void
10278 do_pkhbt (void)
10279 {
10280 inst.instruction |= inst.operands[0].reg << 12;
10281 inst.instruction |= inst.operands[1].reg << 16;
10282 inst.instruction |= inst.operands[2].reg;
10283 if (inst.operands[3].present)
10284 encode_arm_shift (3);
10285 }
10286
10287 /* ARM V6 PKHTB (Argument Parse). */
10288
10289 static void
10290 do_pkhtb (void)
10291 {
10292 if (!inst.operands[3].present)
10293 {
10294 /* If the shift specifier is omitted, turn the instruction
10295 into pkhbt rd, rm, rn. */
10296 inst.instruction &= 0xfff00010;
10297 inst.instruction |= inst.operands[0].reg << 12;
10298 inst.instruction |= inst.operands[1].reg;
10299 inst.instruction |= inst.operands[2].reg << 16;
10300 }
10301 else
10302 {
10303 inst.instruction |= inst.operands[0].reg << 12;
10304 inst.instruction |= inst.operands[1].reg << 16;
10305 inst.instruction |= inst.operands[2].reg;
10306 encode_arm_shift (3);
10307 }
10308 }
10309
10310 /* ARMv5TE: Preload-Cache
10311 MP Extensions: Preload for write
10312
10313 PLD(W) <addr_mode>
10314
10315 Syntactically, like LDR with B=1, W=0, L=1. */
10316
10317 static void
10318 do_pld (void)
10319 {
10320 constraint (!inst.operands[0].isreg,
10321 _("'[' expected after PLD mnemonic"));
10322 constraint (inst.operands[0].postind,
10323 _("post-indexed expression used in preload instruction"));
10324 constraint (inst.operands[0].writeback,
10325 _("writeback used in preload instruction"));
10326 constraint (!inst.operands[0].preind,
10327 _("unindexed addressing used in preload instruction"));
10328 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10329 }
10330
10331 /* ARMv7: PLI <addr_mode> */
10332 static void
10333 do_pli (void)
10334 {
10335 constraint (!inst.operands[0].isreg,
10336 _("'[' expected after PLI mnemonic"));
10337 constraint (inst.operands[0].postind,
10338 _("post-indexed expression used in preload instruction"));
10339 constraint (inst.operands[0].writeback,
10340 _("writeback used in preload instruction"));
10341 constraint (!inst.operands[0].preind,
10342 _("unindexed addressing used in preload instruction"));
10343 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10344 inst.instruction &= ~PRE_INDEX;
10345 }
10346
10347 static void
10348 do_push_pop (void)
10349 {
10350 constraint (inst.operands[0].writeback,
10351 _("push/pop do not support {reglist}^"));
10352 inst.operands[1] = inst.operands[0];
10353 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10354 inst.operands[0].isreg = 1;
10355 inst.operands[0].writeback = 1;
10356 inst.operands[0].reg = REG_SP;
10357 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
10358 }
10359
10360 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10361 word at the specified address and the following word
10362 respectively.
10363 Unconditionally executed.
10364 Error if Rn is R15. */
10365
10366 static void
10367 do_rfe (void)
10368 {
10369 inst.instruction |= inst.operands[0].reg << 16;
10370 if (inst.operands[0].writeback)
10371 inst.instruction |= WRITE_BACK;
10372 }
10373
10374 /* ARM V6 ssat (argument parse). */
10375
10376 static void
10377 do_ssat (void)
10378 {
10379 inst.instruction |= inst.operands[0].reg << 12;
10380 inst.instruction |= (inst.operands[1].imm - 1) << 16;
10381 inst.instruction |= inst.operands[2].reg;
10382
10383 if (inst.operands[3].present)
10384 encode_arm_shift (3);
10385 }
10386
10387 /* ARM V6 usat (argument parse). */
10388
10389 static void
10390 do_usat (void)
10391 {
10392 inst.instruction |= inst.operands[0].reg << 12;
10393 inst.instruction |= inst.operands[1].imm << 16;
10394 inst.instruction |= inst.operands[2].reg;
10395
10396 if (inst.operands[3].present)
10397 encode_arm_shift (3);
10398 }
10399
10400 /* ARM V6 ssat16 (argument parse). */
10401
10402 static void
10403 do_ssat16 (void)
10404 {
10405 inst.instruction |= inst.operands[0].reg << 12;
10406 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10407 inst.instruction |= inst.operands[2].reg;
10408 }
10409
10410 static void
10411 do_usat16 (void)
10412 {
10413 inst.instruction |= inst.operands[0].reg << 12;
10414 inst.instruction |= inst.operands[1].imm << 16;
10415 inst.instruction |= inst.operands[2].reg;
10416 }
10417
10418 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
10419 preserving the other bits.
10420
10421 setend <endian_specifier>, where <endian_specifier> is either
10422 BE or LE. */
10423
10424 static void
10425 do_setend (void)
10426 {
10427 if (warn_on_deprecated
10428 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10429 as_tsktsk (_("setend use is deprecated for ARMv8"));
10430
10431 if (inst.operands[0].imm)
10432 inst.instruction |= 0x200;
10433 }
10434
10435 static void
10436 do_shift (void)
10437 {
10438 unsigned int Rm = (inst.operands[1].present
10439 ? inst.operands[1].reg
10440 : inst.operands[0].reg);
10441
10442 inst.instruction |= inst.operands[0].reg << 12;
10443 inst.instruction |= Rm;
10444 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
10445 {
10446 inst.instruction |= inst.operands[2].reg << 8;
10447 inst.instruction |= SHIFT_BY_REG;
10448 /* PR 12854: Error on extraneous shifts. */
10449 constraint (inst.operands[2].shifted,
10450 _("extraneous shift as part of operand to shift insn"));
10451 }
10452 else
10453 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10454 }
10455
10456 static void
10457 do_smc (void)
10458 {
10459 unsigned int value = inst.relocs[0].exp.X_add_number;
10460 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10461
10462 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10463 inst.relocs[0].pc_rel = 0;
10464 }
10465
10466 static void
10467 do_hvc (void)
10468 {
10469 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10470 inst.relocs[0].pc_rel = 0;
10471 }
10472
10473 static void
10474 do_swi (void)
10475 {
10476 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10477 inst.relocs[0].pc_rel = 0;
10478 }
10479
10480 static void
10481 do_setpan (void)
10482 {
10483 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10484 _("selected processor does not support SETPAN instruction"));
10485
10486 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10487 }
10488
10489 static void
10490 do_t_setpan (void)
10491 {
10492 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10493 _("selected processor does not support SETPAN instruction"));
10494
10495 inst.instruction |= (inst.operands[0].imm << 3);
10496 }
10497
10498 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10499 SMLAxy{cond} Rd,Rm,Rs,Rn
10500 SMLAWy{cond} Rd,Rm,Rs,Rn
10501 Error if any register is R15. */
10502
10503 static void
10504 do_smla (void)
10505 {
10506 inst.instruction |= inst.operands[0].reg << 16;
10507 inst.instruction |= inst.operands[1].reg;
10508 inst.instruction |= inst.operands[2].reg << 8;
10509 inst.instruction |= inst.operands[3].reg << 12;
10510 }
10511
10512 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10513 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10514 Error if any register is R15.
10515 Warning if Rdlo == Rdhi. */
10516
10517 static void
10518 do_smlal (void)
10519 {
10520 inst.instruction |= inst.operands[0].reg << 12;
10521 inst.instruction |= inst.operands[1].reg << 16;
10522 inst.instruction |= inst.operands[2].reg;
10523 inst.instruction |= inst.operands[3].reg << 8;
10524
10525 if (inst.operands[0].reg == inst.operands[1].reg)
10526 as_tsktsk (_("rdhi and rdlo must be different"));
10527 }
10528
10529 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10530 SMULxy{cond} Rd,Rm,Rs
10531 Error if any register is R15. */
10532
10533 static void
10534 do_smul (void)
10535 {
10536 inst.instruction |= inst.operands[0].reg << 16;
10537 inst.instruction |= inst.operands[1].reg;
10538 inst.instruction |= inst.operands[2].reg << 8;
10539 }
10540
10541 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10542 the same for both ARM and Thumb-2. */
10543
10544 static void
10545 do_srs (void)
10546 {
10547 int reg;
10548
10549 if (inst.operands[0].present)
10550 {
10551 reg = inst.operands[0].reg;
10552 constraint (reg != REG_SP, _("SRS base register must be r13"));
10553 }
10554 else
10555 reg = REG_SP;
10556
10557 inst.instruction |= reg << 16;
10558 inst.instruction |= inst.operands[1].imm;
10559 if (inst.operands[0].writeback || inst.operands[1].writeback)
10560 inst.instruction |= WRITE_BACK;
10561 }
10562
10563 /* ARM V6 strex (argument parse). */
10564
10565 static void
10566 do_strex (void)
10567 {
10568 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10569 || inst.operands[2].postind || inst.operands[2].writeback
10570 || inst.operands[2].immisreg || inst.operands[2].shifted
10571 || inst.operands[2].negative
10572 /* See comment in do_ldrex(). */
10573 || (inst.operands[2].reg == REG_PC),
10574 BAD_ADDR_MODE);
10575
10576 constraint (inst.operands[0].reg == inst.operands[1].reg
10577 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10578
10579 constraint (inst.relocs[0].exp.X_op != O_constant
10580 || inst.relocs[0].exp.X_add_number != 0,
10581 _("offset must be zero in ARM encoding"));
10582
10583 inst.instruction |= inst.operands[0].reg << 12;
10584 inst.instruction |= inst.operands[1].reg;
10585 inst.instruction |= inst.operands[2].reg << 16;
10586 inst.relocs[0].type = BFD_RELOC_UNUSED;
10587 }
10588
10589 static void
10590 do_t_strexbh (void)
10591 {
10592 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10593 || inst.operands[2].postind || inst.operands[2].writeback
10594 || inst.operands[2].immisreg || inst.operands[2].shifted
10595 || inst.operands[2].negative,
10596 BAD_ADDR_MODE);
10597
10598 constraint (inst.operands[0].reg == inst.operands[1].reg
10599 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10600
10601 do_rm_rd_rn ();
10602 }
10603
10604 static void
10605 do_strexd (void)
10606 {
10607 constraint (inst.operands[1].reg % 2 != 0,
10608 _("even register required"));
10609 constraint (inst.operands[2].present
10610 && inst.operands[2].reg != inst.operands[1].reg + 1,
10611 _("can only store two consecutive registers"));
10612 /* If op 2 were present and equal to PC, this function wouldn't
10613 have been called in the first place. */
10614 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10615
10616 constraint (inst.operands[0].reg == inst.operands[1].reg
10617 || inst.operands[0].reg == inst.operands[1].reg + 1
10618 || inst.operands[0].reg == inst.operands[3].reg,
10619 BAD_OVERLAP);
10620
10621 inst.instruction |= inst.operands[0].reg << 12;
10622 inst.instruction |= inst.operands[1].reg;
10623 inst.instruction |= inst.operands[3].reg << 16;
10624 }
10625
10626 /* ARM V8 STRL. */
10627 static void
10628 do_stlex (void)
10629 {
10630 constraint (inst.operands[0].reg == inst.operands[1].reg
10631 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10632
10633 do_rd_rm_rn ();
10634 }
10635
10636 static void
10637 do_t_stlex (void)
10638 {
10639 constraint (inst.operands[0].reg == inst.operands[1].reg
10640 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10641
10642 do_rm_rd_rn ();
10643 }
10644
10645 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10646 extends it to 32-bits, and adds the result to a value in another
10647 register. You can specify a rotation by 0, 8, 16, or 24 bits
10648 before extracting the 16-bit value.
10649 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10650 Condition defaults to COND_ALWAYS.
10651 Error if any register uses R15. */
10652
10653 static void
10654 do_sxtah (void)
10655 {
10656 inst.instruction |= inst.operands[0].reg << 12;
10657 inst.instruction |= inst.operands[1].reg << 16;
10658 inst.instruction |= inst.operands[2].reg;
10659 inst.instruction |= inst.operands[3].imm << 10;
10660 }
10661
10662 /* ARM V6 SXTH.
10663
10664 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10665 Condition defaults to COND_ALWAYS.
10666 Error if any register uses R15. */
10667
10668 static void
10669 do_sxth (void)
10670 {
10671 inst.instruction |= inst.operands[0].reg << 12;
10672 inst.instruction |= inst.operands[1].reg;
10673 inst.instruction |= inst.operands[2].imm << 10;
10674 }
10675 \f
10676 /* VFP instructions. In a logical order: SP variant first, monad
10677 before dyad, arithmetic then move then load/store. */
10678
10679 static void
10680 do_vfp_sp_monadic (void)
10681 {
10682 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10683 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10684 _(BAD_FPU));
10685
10686 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10687 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10688 }
10689
10690 static void
10691 do_vfp_sp_dyadic (void)
10692 {
10693 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10694 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10695 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10696 }
10697
10698 static void
10699 do_vfp_sp_compare_z (void)
10700 {
10701 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10702 }
10703
10704 static void
10705 do_vfp_dp_sp_cvt (void)
10706 {
10707 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10708 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10709 }
10710
10711 static void
10712 do_vfp_sp_dp_cvt (void)
10713 {
10714 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10715 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10716 }
10717
10718 static void
10719 do_vfp_reg_from_sp (void)
10720 {
10721 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10722 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10723 _(BAD_FPU));
10724
10725 inst.instruction |= inst.operands[0].reg << 12;
10726 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10727 }
10728
10729 static void
10730 do_vfp_reg2_from_sp2 (void)
10731 {
10732 constraint (inst.operands[2].imm != 2,
10733 _("only two consecutive VFP SP registers allowed here"));
10734 inst.instruction |= inst.operands[0].reg << 12;
10735 inst.instruction |= inst.operands[1].reg << 16;
10736 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10737 }
10738
10739 static void
10740 do_vfp_sp_from_reg (void)
10741 {
10742 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10743 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10744 _(BAD_FPU));
10745
10746 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10747 inst.instruction |= inst.operands[1].reg << 12;
10748 }
10749
10750 static void
10751 do_vfp_sp2_from_reg2 (void)
10752 {
10753 constraint (inst.operands[0].imm != 2,
10754 _("only two consecutive VFP SP registers allowed here"));
10755 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10756 inst.instruction |= inst.operands[1].reg << 12;
10757 inst.instruction |= inst.operands[2].reg << 16;
10758 }
10759
10760 static void
10761 do_vfp_sp_ldst (void)
10762 {
10763 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10764 encode_arm_cp_address (1, FALSE, TRUE, 0);
10765 }
10766
10767 static void
10768 do_vfp_dp_ldst (void)
10769 {
10770 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10771 encode_arm_cp_address (1, FALSE, TRUE, 0);
10772 }
10773
10774
10775 static void
10776 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10777 {
10778 if (inst.operands[0].writeback)
10779 inst.instruction |= WRITE_BACK;
10780 else
10781 constraint (ldstm_type != VFP_LDSTMIA,
10782 _("this addressing mode requires base-register writeback"));
10783 inst.instruction |= inst.operands[0].reg << 16;
10784 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10785 inst.instruction |= inst.operands[1].imm;
10786 }
10787
10788 static void
10789 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10790 {
10791 int count;
10792
10793 if (inst.operands[0].writeback)
10794 inst.instruction |= WRITE_BACK;
10795 else
10796 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10797 _("this addressing mode requires base-register writeback"));
10798
10799 inst.instruction |= inst.operands[0].reg << 16;
10800 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10801
10802 count = inst.operands[1].imm << 1;
10803 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10804 count += 1;
10805
10806 inst.instruction |= count;
10807 }
10808
10809 static void
10810 do_vfp_sp_ldstmia (void)
10811 {
10812 vfp_sp_ldstm (VFP_LDSTMIA);
10813 }
10814
10815 static void
10816 do_vfp_sp_ldstmdb (void)
10817 {
10818 vfp_sp_ldstm (VFP_LDSTMDB);
10819 }
10820
10821 static void
10822 do_vfp_dp_ldstmia (void)
10823 {
10824 vfp_dp_ldstm (VFP_LDSTMIA);
10825 }
10826
10827 static void
10828 do_vfp_dp_ldstmdb (void)
10829 {
10830 vfp_dp_ldstm (VFP_LDSTMDB);
10831 }
10832
10833 static void
10834 do_vfp_xp_ldstmia (void)
10835 {
10836 vfp_dp_ldstm (VFP_LDSTMIAX);
10837 }
10838
10839 static void
10840 do_vfp_xp_ldstmdb (void)
10841 {
10842 vfp_dp_ldstm (VFP_LDSTMDBX);
10843 }
10844
10845 static void
10846 do_vfp_dp_rd_rm (void)
10847 {
10848 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10849 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10850 _(BAD_FPU));
10851
10852 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10853 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10854 }
10855
10856 static void
10857 do_vfp_dp_rn_rd (void)
10858 {
10859 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10860 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10861 }
10862
10863 static void
10864 do_vfp_dp_rd_rn (void)
10865 {
10866 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10867 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10868 }
10869
10870 static void
10871 do_vfp_dp_rd_rn_rm (void)
10872 {
10873 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10874 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10875 _(BAD_FPU));
10876
10877 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10878 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10879 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10880 }
10881
10882 static void
10883 do_vfp_dp_rd (void)
10884 {
10885 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10886 }
10887
10888 static void
10889 do_vfp_dp_rm_rd_rn (void)
10890 {
10891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10892 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10893 _(BAD_FPU));
10894
10895 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10896 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10897 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10898 }
10899
10900 /* VFPv3 instructions. */
10901 static void
10902 do_vfp_sp_const (void)
10903 {
10904 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10905 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10906 inst.instruction |= (inst.operands[1].imm & 0x0f);
10907 }
10908
10909 static void
10910 do_vfp_dp_const (void)
10911 {
10912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10913 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10914 inst.instruction |= (inst.operands[1].imm & 0x0f);
10915 }
10916
10917 static void
10918 vfp_conv (int srcsize)
10919 {
10920 int immbits = srcsize - inst.operands[1].imm;
10921
10922 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10923 {
10924 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10925 i.e. immbits must be in range 0 - 16. */
10926 inst.error = _("immediate value out of range, expected range [0, 16]");
10927 return;
10928 }
10929 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10930 {
10931 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10932 i.e. immbits must be in range 0 - 31. */
10933 inst.error = _("immediate value out of range, expected range [1, 32]");
10934 return;
10935 }
10936
10937 inst.instruction |= (immbits & 1) << 5;
10938 inst.instruction |= (immbits >> 1);
10939 }
10940
10941 static void
10942 do_vfp_sp_conv_16 (void)
10943 {
10944 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10945 vfp_conv (16);
10946 }
10947
10948 static void
10949 do_vfp_dp_conv_16 (void)
10950 {
10951 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10952 vfp_conv (16);
10953 }
10954
10955 static void
10956 do_vfp_sp_conv_32 (void)
10957 {
10958 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10959 vfp_conv (32);
10960 }
10961
10962 static void
10963 do_vfp_dp_conv_32 (void)
10964 {
10965 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10966 vfp_conv (32);
10967 }
10968 \f
10969 /* FPA instructions. Also in a logical order. */
10970
10971 static void
10972 do_fpa_cmp (void)
10973 {
10974 inst.instruction |= inst.operands[0].reg << 16;
10975 inst.instruction |= inst.operands[1].reg;
10976 }
10977
10978 static void
10979 do_fpa_ldmstm (void)
10980 {
10981 inst.instruction |= inst.operands[0].reg << 12;
10982 switch (inst.operands[1].imm)
10983 {
10984 case 1: inst.instruction |= CP_T_X; break;
10985 case 2: inst.instruction |= CP_T_Y; break;
10986 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10987 case 4: break;
10988 default: abort ();
10989 }
10990
10991 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10992 {
10993 /* The instruction specified "ea" or "fd", so we can only accept
10994 [Rn]{!}. The instruction does not really support stacking or
10995 unstacking, so we have to emulate these by setting appropriate
10996 bits and offsets. */
10997 constraint (inst.relocs[0].exp.X_op != O_constant
10998 || inst.relocs[0].exp.X_add_number != 0,
10999 _("this instruction does not support indexing"));
11000
11001 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
11002 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
11003
11004 if (!(inst.instruction & INDEX_UP))
11005 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
11006
11007 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
11008 {
11009 inst.operands[2].preind = 0;
11010 inst.operands[2].postind = 1;
11011 }
11012 }
11013
11014 encode_arm_cp_address (2, TRUE, TRUE, 0);
11015 }
11016 \f
11017 /* iWMMXt instructions: strictly in alphabetical order. */
11018
11019 static void
11020 do_iwmmxt_tandorc (void)
11021 {
11022 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
11023 }
11024
11025 static void
11026 do_iwmmxt_textrc (void)
11027 {
11028 inst.instruction |= inst.operands[0].reg << 12;
11029 inst.instruction |= inst.operands[1].imm;
11030 }
11031
11032 static void
11033 do_iwmmxt_textrm (void)
11034 {
11035 inst.instruction |= inst.operands[0].reg << 12;
11036 inst.instruction |= inst.operands[1].reg << 16;
11037 inst.instruction |= inst.operands[2].imm;
11038 }
11039
11040 static void
11041 do_iwmmxt_tinsr (void)
11042 {
11043 inst.instruction |= inst.operands[0].reg << 16;
11044 inst.instruction |= inst.operands[1].reg << 12;
11045 inst.instruction |= inst.operands[2].imm;
11046 }
11047
11048 static void
11049 do_iwmmxt_tmia (void)
11050 {
11051 inst.instruction |= inst.operands[0].reg << 5;
11052 inst.instruction |= inst.operands[1].reg;
11053 inst.instruction |= inst.operands[2].reg << 12;
11054 }
11055
11056 static void
11057 do_iwmmxt_waligni (void)
11058 {
11059 inst.instruction |= inst.operands[0].reg << 12;
11060 inst.instruction |= inst.operands[1].reg << 16;
11061 inst.instruction |= inst.operands[2].reg;
11062 inst.instruction |= inst.operands[3].imm << 20;
11063 }
11064
11065 static void
11066 do_iwmmxt_wmerge (void)
11067 {
11068 inst.instruction |= inst.operands[0].reg << 12;
11069 inst.instruction |= inst.operands[1].reg << 16;
11070 inst.instruction |= inst.operands[2].reg;
11071 inst.instruction |= inst.operands[3].imm << 21;
11072 }
11073
11074 static void
11075 do_iwmmxt_wmov (void)
11076 {
11077 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
11078 inst.instruction |= inst.operands[0].reg << 12;
11079 inst.instruction |= inst.operands[1].reg << 16;
11080 inst.instruction |= inst.operands[1].reg;
11081 }
11082
11083 static void
11084 do_iwmmxt_wldstbh (void)
11085 {
11086 int reloc;
11087 inst.instruction |= inst.operands[0].reg << 12;
11088 if (thumb_mode)
11089 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11090 else
11091 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11092 encode_arm_cp_address (1, TRUE, FALSE, reloc);
11093 }
11094
11095 static void
11096 do_iwmmxt_wldstw (void)
11097 {
11098 /* RIWR_RIWC clears .isreg for a control register. */
11099 if (!inst.operands[0].isreg)
11100 {
11101 constraint (inst.cond != COND_ALWAYS, BAD_COND);
11102 inst.instruction |= 0xf0000000;
11103 }
11104
11105 inst.instruction |= inst.operands[0].reg << 12;
11106 encode_arm_cp_address (1, TRUE, TRUE, 0);
11107 }
11108
11109 static void
11110 do_iwmmxt_wldstd (void)
11111 {
11112 inst.instruction |= inst.operands[0].reg << 12;
11113 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11114 && inst.operands[1].immisreg)
11115 {
11116 inst.instruction &= ~0x1a000ff;
11117 inst.instruction |= (0xfU << 28);
11118 if (inst.operands[1].preind)
11119 inst.instruction |= PRE_INDEX;
11120 if (!inst.operands[1].negative)
11121 inst.instruction |= INDEX_UP;
11122 if (inst.operands[1].writeback)
11123 inst.instruction |= WRITE_BACK;
11124 inst.instruction |= inst.operands[1].reg << 16;
11125 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11126 inst.instruction |= inst.operands[1].imm;
11127 }
11128 else
11129 encode_arm_cp_address (1, TRUE, FALSE, 0);
11130 }
11131
11132 static void
11133 do_iwmmxt_wshufh (void)
11134 {
11135 inst.instruction |= inst.operands[0].reg << 12;
11136 inst.instruction |= inst.operands[1].reg << 16;
11137 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11138 inst.instruction |= (inst.operands[2].imm & 0x0f);
11139 }
11140
11141 static void
11142 do_iwmmxt_wzero (void)
11143 {
11144 /* WZERO reg is an alias for WANDN reg, reg, reg. */
11145 inst.instruction |= inst.operands[0].reg;
11146 inst.instruction |= inst.operands[0].reg << 12;
11147 inst.instruction |= inst.operands[0].reg << 16;
11148 }
11149
11150 static void
11151 do_iwmmxt_wrwrwr_or_imm5 (void)
11152 {
11153 if (inst.operands[2].isreg)
11154 do_rd_rn_rm ();
11155 else {
11156 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11157 _("immediate operand requires iWMMXt2"));
11158 do_rd_rn ();
11159 if (inst.operands[2].imm == 0)
11160 {
11161 switch ((inst.instruction >> 20) & 0xf)
11162 {
11163 case 4:
11164 case 5:
11165 case 6:
11166 case 7:
11167 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
11168 inst.operands[2].imm = 16;
11169 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11170 break;
11171 case 8:
11172 case 9:
11173 case 10:
11174 case 11:
11175 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
11176 inst.operands[2].imm = 32;
11177 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11178 break;
11179 case 12:
11180 case 13:
11181 case 14:
11182 case 15:
11183 {
11184 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
11185 unsigned long wrn;
11186 wrn = (inst.instruction >> 16) & 0xf;
11187 inst.instruction &= 0xff0fff0f;
11188 inst.instruction |= wrn;
11189 /* Bail out here; the instruction is now assembled. */
11190 return;
11191 }
11192 }
11193 }
11194 /* Map 32 -> 0, etc. */
11195 inst.operands[2].imm &= 0x1f;
11196 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11197 }
11198 }
11199 \f
11200 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
11201 operations first, then control, shift, and load/store. */
11202
11203 /* Insns like "foo X,Y,Z". */
11204
11205 static void
11206 do_mav_triple (void)
11207 {
11208 inst.instruction |= inst.operands[0].reg << 16;
11209 inst.instruction |= inst.operands[1].reg;
11210 inst.instruction |= inst.operands[2].reg << 12;
11211 }
11212
11213 /* Insns like "foo W,X,Y,Z".
11214 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
11215
11216 static void
11217 do_mav_quad (void)
11218 {
11219 inst.instruction |= inst.operands[0].reg << 5;
11220 inst.instruction |= inst.operands[1].reg << 12;
11221 inst.instruction |= inst.operands[2].reg << 16;
11222 inst.instruction |= inst.operands[3].reg;
11223 }
11224
11225 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
11226 static void
11227 do_mav_dspsc (void)
11228 {
11229 inst.instruction |= inst.operands[1].reg << 12;
11230 }
11231
11232 /* Maverick shift immediate instructions.
11233 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11234 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
11235
11236 static void
11237 do_mav_shift (void)
11238 {
11239 int imm = inst.operands[2].imm;
11240
11241 inst.instruction |= inst.operands[0].reg << 12;
11242 inst.instruction |= inst.operands[1].reg << 16;
11243
11244 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11245 Bits 5-7 of the insn should have bits 4-6 of the immediate.
11246 Bit 4 should be 0. */
11247 imm = (imm & 0xf) | ((imm & 0x70) << 1);
11248
11249 inst.instruction |= imm;
11250 }
11251 \f
11252 /* XScale instructions. Also sorted arithmetic before move. */
11253
11254 /* Xscale multiply-accumulate (argument parse)
11255 MIAcc acc0,Rm,Rs
11256 MIAPHcc acc0,Rm,Rs
11257 MIAxycc acc0,Rm,Rs. */
11258
11259 static void
11260 do_xsc_mia (void)
11261 {
11262 inst.instruction |= inst.operands[1].reg;
11263 inst.instruction |= inst.operands[2].reg << 12;
11264 }
11265
11266 /* Xscale move-accumulator-register (argument parse)
11267
11268 MARcc acc0,RdLo,RdHi. */
11269
11270 static void
11271 do_xsc_mar (void)
11272 {
11273 inst.instruction |= inst.operands[1].reg << 12;
11274 inst.instruction |= inst.operands[2].reg << 16;
11275 }
11276
11277 /* Xscale move-register-accumulator (argument parse)
11278
11279 MRAcc RdLo,RdHi,acc0. */
11280
11281 static void
11282 do_xsc_mra (void)
11283 {
11284 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11285 inst.instruction |= inst.operands[0].reg << 12;
11286 inst.instruction |= inst.operands[1].reg << 16;
11287 }
11288 \f
11289 /* Encoding functions relevant only to Thumb. */
11290
11291 /* inst.operands[i] is a shifted-register operand; encode
11292 it into inst.instruction in the format used by Thumb32. */
11293
11294 static void
11295 encode_thumb32_shifted_operand (int i)
11296 {
11297 unsigned int value = inst.relocs[0].exp.X_add_number;
11298 unsigned int shift = inst.operands[i].shift_kind;
11299
11300 constraint (inst.operands[i].immisreg,
11301 _("shift by register not allowed in thumb mode"));
11302 inst.instruction |= inst.operands[i].reg;
11303 if (shift == SHIFT_RRX)
11304 inst.instruction |= SHIFT_ROR << 4;
11305 else
11306 {
11307 constraint (inst.relocs[0].exp.X_op != O_constant,
11308 _("expression too complex"));
11309
11310 constraint (value > 32
11311 || (value == 32 && (shift == SHIFT_LSL
11312 || shift == SHIFT_ROR)),
11313 _("shift expression is too large"));
11314
11315 if (value == 0)
11316 shift = SHIFT_LSL;
11317 else if (value == 32)
11318 value = 0;
11319
11320 inst.instruction |= shift << 4;
11321 inst.instruction |= (value & 0x1c) << 10;
11322 inst.instruction |= (value & 0x03) << 6;
11323 }
11324 }
11325
11326
11327 /* inst.operands[i] was set up by parse_address. Encode it into a
11328 Thumb32 format load or store instruction. Reject forms that cannot
11329 be used with such instructions. If is_t is true, reject forms that
11330 cannot be used with a T instruction; if is_d is true, reject forms
11331 that cannot be used with a D instruction. If it is a store insn,
11332 reject PC in Rn. */
11333
11334 static void
11335 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
11336 {
11337 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
11338
11339 constraint (!inst.operands[i].isreg,
11340 _("Instruction does not support =N addresses"));
11341
11342 inst.instruction |= inst.operands[i].reg << 16;
11343 if (inst.operands[i].immisreg)
11344 {
11345 constraint (is_pc, BAD_PC_ADDRESSING);
11346 constraint (is_t || is_d, _("cannot use register index with this instruction"));
11347 constraint (inst.operands[i].negative,
11348 _("Thumb does not support negative register indexing"));
11349 constraint (inst.operands[i].postind,
11350 _("Thumb does not support register post-indexing"));
11351 constraint (inst.operands[i].writeback,
11352 _("Thumb does not support register indexing with writeback"));
11353 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11354 _("Thumb supports only LSL in shifted register indexing"));
11355
11356 inst.instruction |= inst.operands[i].imm;
11357 if (inst.operands[i].shifted)
11358 {
11359 constraint (inst.relocs[0].exp.X_op != O_constant,
11360 _("expression too complex"));
11361 constraint (inst.relocs[0].exp.X_add_number < 0
11362 || inst.relocs[0].exp.X_add_number > 3,
11363 _("shift out of range"));
11364 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11365 }
11366 inst.relocs[0].type = BFD_RELOC_UNUSED;
11367 }
11368 else if (inst.operands[i].preind)
11369 {
11370 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11371 constraint (is_t && inst.operands[i].writeback,
11372 _("cannot use writeback with this instruction"));
11373 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11374 BAD_PC_ADDRESSING);
11375
11376 if (is_d)
11377 {
11378 inst.instruction |= 0x01000000;
11379 if (inst.operands[i].writeback)
11380 inst.instruction |= 0x00200000;
11381 }
11382 else
11383 {
11384 inst.instruction |= 0x00000c00;
11385 if (inst.operands[i].writeback)
11386 inst.instruction |= 0x00000100;
11387 }
11388 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11389 }
11390 else if (inst.operands[i].postind)
11391 {
11392 gas_assert (inst.operands[i].writeback);
11393 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11394 constraint (is_t, _("cannot use post-indexing with this instruction"));
11395
11396 if (is_d)
11397 inst.instruction |= 0x00200000;
11398 else
11399 inst.instruction |= 0x00000900;
11400 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11401 }
11402 else /* unindexed - only for coprocessor */
11403 inst.error = _("instruction does not accept unindexed addressing");
11404 }
11405
11406 /* Table of Thumb instructions which exist in 16- and/or 32-bit
11407 encodings (the latter only in post-V6T2 cores). The index is the
11408 value used in the insns table below. When there is more than one
11409 possible 16-bit encoding for the instruction, this table always
11410 holds variant (1).
11411 Also contains several pseudo-instructions used during relaxation. */
11412 #define T16_32_TAB \
11413 X(_adc, 4140, eb400000), \
11414 X(_adcs, 4140, eb500000), \
11415 X(_add, 1c00, eb000000), \
11416 X(_adds, 1c00, eb100000), \
11417 X(_addi, 0000, f1000000), \
11418 X(_addis, 0000, f1100000), \
11419 X(_add_pc,000f, f20f0000), \
11420 X(_add_sp,000d, f10d0000), \
11421 X(_adr, 000f, f20f0000), \
11422 X(_and, 4000, ea000000), \
11423 X(_ands, 4000, ea100000), \
11424 X(_asr, 1000, fa40f000), \
11425 X(_asrs, 1000, fa50f000), \
11426 X(_b, e000, f000b000), \
11427 X(_bcond, d000, f0008000), \
11428 X(_bf, 0000, f040e001), \
11429 X(_bfcsel,0000, f000e001), \
11430 X(_bfx, 0000, f060e001), \
11431 X(_bfl, 0000, f000c001), \
11432 X(_bflx, 0000, f070e001), \
11433 X(_bic, 4380, ea200000), \
11434 X(_bics, 4380, ea300000), \
11435 X(_cinc, 0000, ea509000), \
11436 X(_cinv, 0000, ea50a000), \
11437 X(_cmn, 42c0, eb100f00), \
11438 X(_cmp, 2800, ebb00f00), \
11439 X(_cneg, 0000, ea50b000), \
11440 X(_cpsie, b660, f3af8400), \
11441 X(_cpsid, b670, f3af8600), \
11442 X(_cpy, 4600, ea4f0000), \
11443 X(_csel, 0000, ea508000), \
11444 X(_cset, 0000, ea5f900f), \
11445 X(_csetm, 0000, ea5fa00f), \
11446 X(_csinc, 0000, ea509000), \
11447 X(_csinv, 0000, ea50a000), \
11448 X(_csneg, 0000, ea50b000), \
11449 X(_dec_sp,80dd, f1ad0d00), \
11450 X(_dls, 0000, f040e001), \
11451 X(_dlstp, 0000, f000e001), \
11452 X(_eor, 4040, ea800000), \
11453 X(_eors, 4040, ea900000), \
11454 X(_inc_sp,00dd, f10d0d00), \
11455 X(_lctp, 0000, f00fe001), \
11456 X(_ldmia, c800, e8900000), \
11457 X(_ldr, 6800, f8500000), \
11458 X(_ldrb, 7800, f8100000), \
11459 X(_ldrh, 8800, f8300000), \
11460 X(_ldrsb, 5600, f9100000), \
11461 X(_ldrsh, 5e00, f9300000), \
11462 X(_ldr_pc,4800, f85f0000), \
11463 X(_ldr_pc2,4800, f85f0000), \
11464 X(_ldr_sp,9800, f85d0000), \
11465 X(_le, 0000, f00fc001), \
11466 X(_letp, 0000, f01fc001), \
11467 X(_lsl, 0000, fa00f000), \
11468 X(_lsls, 0000, fa10f000), \
11469 X(_lsr, 0800, fa20f000), \
11470 X(_lsrs, 0800, fa30f000), \
11471 X(_mov, 2000, ea4f0000), \
11472 X(_movs, 2000, ea5f0000), \
11473 X(_mul, 4340, fb00f000), \
11474 X(_muls, 4340, ffffffff), /* no 32b muls */ \
11475 X(_mvn, 43c0, ea6f0000), \
11476 X(_mvns, 43c0, ea7f0000), \
11477 X(_neg, 4240, f1c00000), /* rsb #0 */ \
11478 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
11479 X(_orr, 4300, ea400000), \
11480 X(_orrs, 4300, ea500000), \
11481 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
11482 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
11483 X(_rev, ba00, fa90f080), \
11484 X(_rev16, ba40, fa90f090), \
11485 X(_revsh, bac0, fa90f0b0), \
11486 X(_ror, 41c0, fa60f000), \
11487 X(_rors, 41c0, fa70f000), \
11488 X(_sbc, 4180, eb600000), \
11489 X(_sbcs, 4180, eb700000), \
11490 X(_stmia, c000, e8800000), \
11491 X(_str, 6000, f8400000), \
11492 X(_strb, 7000, f8000000), \
11493 X(_strh, 8000, f8200000), \
11494 X(_str_sp,9000, f84d0000), \
11495 X(_sub, 1e00, eba00000), \
11496 X(_subs, 1e00, ebb00000), \
11497 X(_subi, 8000, f1a00000), \
11498 X(_subis, 8000, f1b00000), \
11499 X(_sxtb, b240, fa4ff080), \
11500 X(_sxth, b200, fa0ff080), \
11501 X(_tst, 4200, ea100f00), \
11502 X(_uxtb, b2c0, fa5ff080), \
11503 X(_uxth, b280, fa1ff080), \
11504 X(_nop, bf00, f3af8000), \
11505 X(_yield, bf10, f3af8001), \
11506 X(_wfe, bf20, f3af8002), \
11507 X(_wfi, bf30, f3af8003), \
11508 X(_wls, 0000, f040c001), \
11509 X(_wlstp, 0000, f000c001), \
11510 X(_sev, bf40, f3af8004), \
11511 X(_sevl, bf50, f3af8005), \
11512 X(_udf, de00, f7f0a000)
11513
11514 /* To catch errors in encoding functions, the codes are all offset by
11515 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11516 as 16-bit instructions. */
11517 #define X(a,b,c) T_MNEM##a
11518 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11519 #undef X
11520
11521 #define X(a,b,c) 0x##b
11522 static const unsigned short thumb_op16[] = { T16_32_TAB };
11523 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11524 #undef X
11525
11526 #define X(a,b,c) 0x##c
11527 static const unsigned int thumb_op32[] = { T16_32_TAB };
11528 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11529 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
11530 #undef X
11531 #undef T16_32_TAB
11532
11533 /* Thumb instruction encoders, in alphabetical order. */
11534
11535 /* ADDW or SUBW. */
11536
11537 static void
11538 do_t_add_sub_w (void)
11539 {
11540 int Rd, Rn;
11541
11542 Rd = inst.operands[0].reg;
11543 Rn = inst.operands[1].reg;
11544
11545 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11546 is the SP-{plus,minus}-immediate form of the instruction. */
11547 if (Rn == REG_SP)
11548 constraint (Rd == REG_PC, BAD_PC);
11549 else
11550 reject_bad_reg (Rd);
11551
11552 inst.instruction |= (Rn << 16) | (Rd << 8);
11553 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11554 }
11555
11556 /* Parse an add or subtract instruction. We get here with inst.instruction
11557 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11558
11559 static void
11560 do_t_add_sub (void)
11561 {
11562 int Rd, Rs, Rn;
11563
11564 Rd = inst.operands[0].reg;
11565 Rs = (inst.operands[1].present
11566 ? inst.operands[1].reg /* Rd, Rs, foo */
11567 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11568
11569 if (Rd == REG_PC)
11570 set_pred_insn_type_last ();
11571
11572 if (unified_syntax)
11573 {
11574 bfd_boolean flags;
11575 bfd_boolean narrow;
11576 int opcode;
11577
11578 flags = (inst.instruction == T_MNEM_adds
11579 || inst.instruction == T_MNEM_subs);
11580 if (flags)
11581 narrow = !in_pred_block ();
11582 else
11583 narrow = in_pred_block ();
11584 if (!inst.operands[2].isreg)
11585 {
11586 int add;
11587
11588 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11589 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11590
11591 add = (inst.instruction == T_MNEM_add
11592 || inst.instruction == T_MNEM_adds);
11593 opcode = 0;
11594 if (inst.size_req != 4)
11595 {
11596 /* Attempt to use a narrow opcode, with relaxation if
11597 appropriate. */
11598 if (Rd == REG_SP && Rs == REG_SP && !flags)
11599 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11600 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11601 opcode = T_MNEM_add_sp;
11602 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11603 opcode = T_MNEM_add_pc;
11604 else if (Rd <= 7 && Rs <= 7 && narrow)
11605 {
11606 if (flags)
11607 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11608 else
11609 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11610 }
11611 if (opcode)
11612 {
11613 inst.instruction = THUMB_OP16(opcode);
11614 inst.instruction |= (Rd << 4) | Rs;
11615 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11616 || (inst.relocs[0].type
11617 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11618 {
11619 if (inst.size_req == 2)
11620 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11621 else
11622 inst.relax = opcode;
11623 }
11624 }
11625 else
11626 constraint (inst.size_req == 2, BAD_HIREG);
11627 }
11628 if (inst.size_req == 4
11629 || (inst.size_req != 2 && !opcode))
11630 {
11631 constraint ((inst.relocs[0].type
11632 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11633 && (inst.relocs[0].type
11634 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11635 THUMB1_RELOC_ONLY);
11636 if (Rd == REG_PC)
11637 {
11638 constraint (add, BAD_PC);
11639 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11640 _("only SUBS PC, LR, #const allowed"));
11641 constraint (inst.relocs[0].exp.X_op != O_constant,
11642 _("expression too complex"));
11643 constraint (inst.relocs[0].exp.X_add_number < 0
11644 || inst.relocs[0].exp.X_add_number > 0xff,
11645 _("immediate value out of range"));
11646 inst.instruction = T2_SUBS_PC_LR
11647 | inst.relocs[0].exp.X_add_number;
11648 inst.relocs[0].type = BFD_RELOC_UNUSED;
11649 return;
11650 }
11651 else if (Rs == REG_PC)
11652 {
11653 /* Always use addw/subw. */
11654 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11655 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11656 }
11657 else
11658 {
11659 inst.instruction = THUMB_OP32 (inst.instruction);
11660 inst.instruction = (inst.instruction & 0xe1ffffff)
11661 | 0x10000000;
11662 if (flags)
11663 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11664 else
11665 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11666 }
11667 inst.instruction |= Rd << 8;
11668 inst.instruction |= Rs << 16;
11669 }
11670 }
11671 else
11672 {
11673 unsigned int value = inst.relocs[0].exp.X_add_number;
11674 unsigned int shift = inst.operands[2].shift_kind;
11675
11676 Rn = inst.operands[2].reg;
11677 /* See if we can do this with a 16-bit instruction. */
11678 if (!inst.operands[2].shifted && inst.size_req != 4)
11679 {
11680 if (Rd > 7 || Rs > 7 || Rn > 7)
11681 narrow = FALSE;
11682
11683 if (narrow)
11684 {
11685 inst.instruction = ((inst.instruction == T_MNEM_adds
11686 || inst.instruction == T_MNEM_add)
11687 ? T_OPCODE_ADD_R3
11688 : T_OPCODE_SUB_R3);
11689 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11690 return;
11691 }
11692
11693 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11694 {
11695 /* Thumb-1 cores (except v6-M) require at least one high
11696 register in a narrow non flag setting add. */
11697 if (Rd > 7 || Rn > 7
11698 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11699 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11700 {
11701 if (Rd == Rn)
11702 {
11703 Rn = Rs;
11704 Rs = Rd;
11705 }
11706 inst.instruction = T_OPCODE_ADD_HI;
11707 inst.instruction |= (Rd & 8) << 4;
11708 inst.instruction |= (Rd & 7);
11709 inst.instruction |= Rn << 3;
11710 return;
11711 }
11712 }
11713 }
11714
11715 constraint (Rd == REG_PC, BAD_PC);
11716 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11717 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11718 constraint (Rs == REG_PC, BAD_PC);
11719 reject_bad_reg (Rn);
11720
11721 /* If we get here, it can't be done in 16 bits. */
11722 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11723 _("shift must be constant"));
11724 inst.instruction = THUMB_OP32 (inst.instruction);
11725 inst.instruction |= Rd << 8;
11726 inst.instruction |= Rs << 16;
11727 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11728 _("shift value over 3 not allowed in thumb mode"));
11729 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11730 _("only LSL shift allowed in thumb mode"));
11731 encode_thumb32_shifted_operand (2);
11732 }
11733 }
11734 else
11735 {
11736 constraint (inst.instruction == T_MNEM_adds
11737 || inst.instruction == T_MNEM_subs,
11738 BAD_THUMB32);
11739
11740 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11741 {
11742 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11743 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11744 BAD_HIREG);
11745
11746 inst.instruction = (inst.instruction == T_MNEM_add
11747 ? 0x0000 : 0x8000);
11748 inst.instruction |= (Rd << 4) | Rs;
11749 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11750 return;
11751 }
11752
11753 Rn = inst.operands[2].reg;
11754 constraint (inst.operands[2].shifted, _("unshifted register required"));
11755
11756 /* We now have Rd, Rs, and Rn set to registers. */
11757 if (Rd > 7 || Rs > 7 || Rn > 7)
11758 {
11759 /* Can't do this for SUB. */
11760 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11761 inst.instruction = T_OPCODE_ADD_HI;
11762 inst.instruction |= (Rd & 8) << 4;
11763 inst.instruction |= (Rd & 7);
11764 if (Rs == Rd)
11765 inst.instruction |= Rn << 3;
11766 else if (Rn == Rd)
11767 inst.instruction |= Rs << 3;
11768 else
11769 constraint (1, _("dest must overlap one source register"));
11770 }
11771 else
11772 {
11773 inst.instruction = (inst.instruction == T_MNEM_add
11774 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11775 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11776 }
11777 }
11778 }
11779
11780 static void
11781 do_t_adr (void)
11782 {
11783 unsigned Rd;
11784
11785 Rd = inst.operands[0].reg;
11786 reject_bad_reg (Rd);
11787
11788 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11789 {
11790 /* Defer to section relaxation. */
11791 inst.relax = inst.instruction;
11792 inst.instruction = THUMB_OP16 (inst.instruction);
11793 inst.instruction |= Rd << 4;
11794 }
11795 else if (unified_syntax && inst.size_req != 2)
11796 {
11797 /* Generate a 32-bit opcode. */
11798 inst.instruction = THUMB_OP32 (inst.instruction);
11799 inst.instruction |= Rd << 8;
11800 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11801 inst.relocs[0].pc_rel = 1;
11802 }
11803 else
11804 {
11805 /* Generate a 16-bit opcode. */
11806 inst.instruction = THUMB_OP16 (inst.instruction);
11807 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11808 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11809 inst.relocs[0].pc_rel = 1;
11810 inst.instruction |= Rd << 4;
11811 }
11812
11813 if (inst.relocs[0].exp.X_op == O_symbol
11814 && inst.relocs[0].exp.X_add_symbol != NULL
11815 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11816 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11817 inst.relocs[0].exp.X_add_number += 1;
11818 }
11819
11820 /* Arithmetic instructions for which there is just one 16-bit
11821 instruction encoding, and it allows only two low registers.
11822 For maximal compatibility with ARM syntax, we allow three register
11823 operands even when Thumb-32 instructions are not available, as long
11824 as the first two are identical. For instance, both "sbc r0,r1" and
11825 "sbc r0,r0,r1" are allowed. */
11826 static void
11827 do_t_arit3 (void)
11828 {
11829 int Rd, Rs, Rn;
11830
11831 Rd = inst.operands[0].reg;
11832 Rs = (inst.operands[1].present
11833 ? inst.operands[1].reg /* Rd, Rs, foo */
11834 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11835 Rn = inst.operands[2].reg;
11836
11837 reject_bad_reg (Rd);
11838 reject_bad_reg (Rs);
11839 if (inst.operands[2].isreg)
11840 reject_bad_reg (Rn);
11841
11842 if (unified_syntax)
11843 {
11844 if (!inst.operands[2].isreg)
11845 {
11846 /* For an immediate, we always generate a 32-bit opcode;
11847 section relaxation will shrink it later if possible. */
11848 inst.instruction = THUMB_OP32 (inst.instruction);
11849 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11850 inst.instruction |= Rd << 8;
11851 inst.instruction |= Rs << 16;
11852 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11853 }
11854 else
11855 {
11856 bfd_boolean narrow;
11857
11858 /* See if we can do this with a 16-bit instruction. */
11859 if (THUMB_SETS_FLAGS (inst.instruction))
11860 narrow = !in_pred_block ();
11861 else
11862 narrow = in_pred_block ();
11863
11864 if (Rd > 7 || Rn > 7 || Rs > 7)
11865 narrow = FALSE;
11866 if (inst.operands[2].shifted)
11867 narrow = FALSE;
11868 if (inst.size_req == 4)
11869 narrow = FALSE;
11870
11871 if (narrow
11872 && Rd == Rs)
11873 {
11874 inst.instruction = THUMB_OP16 (inst.instruction);
11875 inst.instruction |= Rd;
11876 inst.instruction |= Rn << 3;
11877 return;
11878 }
11879
11880 /* If we get here, it can't be done in 16 bits. */
11881 constraint (inst.operands[2].shifted
11882 && inst.operands[2].immisreg,
11883 _("shift must be constant"));
11884 inst.instruction = THUMB_OP32 (inst.instruction);
11885 inst.instruction |= Rd << 8;
11886 inst.instruction |= Rs << 16;
11887 encode_thumb32_shifted_operand (2);
11888 }
11889 }
11890 else
11891 {
11892 /* On its face this is a lie - the instruction does set the
11893 flags. However, the only supported mnemonic in this mode
11894 says it doesn't. */
11895 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11896
11897 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11898 _("unshifted register required"));
11899 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11900 constraint (Rd != Rs,
11901 _("dest and source1 must be the same register"));
11902
11903 inst.instruction = THUMB_OP16 (inst.instruction);
11904 inst.instruction |= Rd;
11905 inst.instruction |= Rn << 3;
11906 }
11907 }
11908
11909 /* Similarly, but for instructions where the arithmetic operation is
11910 commutative, so we can allow either of them to be different from
11911 the destination operand in a 16-bit instruction. For instance, all
11912 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11913 accepted. */
11914 static void
11915 do_t_arit3c (void)
11916 {
11917 int Rd, Rs, Rn;
11918
11919 Rd = inst.operands[0].reg;
11920 Rs = (inst.operands[1].present
11921 ? inst.operands[1].reg /* Rd, Rs, foo */
11922 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11923 Rn = inst.operands[2].reg;
11924
11925 reject_bad_reg (Rd);
11926 reject_bad_reg (Rs);
11927 if (inst.operands[2].isreg)
11928 reject_bad_reg (Rn);
11929
11930 if (unified_syntax)
11931 {
11932 if (!inst.operands[2].isreg)
11933 {
11934 /* For an immediate, we always generate a 32-bit opcode;
11935 section relaxation will shrink it later if possible. */
11936 inst.instruction = THUMB_OP32 (inst.instruction);
11937 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11938 inst.instruction |= Rd << 8;
11939 inst.instruction |= Rs << 16;
11940 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11941 }
11942 else
11943 {
11944 bfd_boolean narrow;
11945
11946 /* See if we can do this with a 16-bit instruction. */
11947 if (THUMB_SETS_FLAGS (inst.instruction))
11948 narrow = !in_pred_block ();
11949 else
11950 narrow = in_pred_block ();
11951
11952 if (Rd > 7 || Rn > 7 || Rs > 7)
11953 narrow = FALSE;
11954 if (inst.operands[2].shifted)
11955 narrow = FALSE;
11956 if (inst.size_req == 4)
11957 narrow = FALSE;
11958
11959 if (narrow)
11960 {
11961 if (Rd == Rs)
11962 {
11963 inst.instruction = THUMB_OP16 (inst.instruction);
11964 inst.instruction |= Rd;
11965 inst.instruction |= Rn << 3;
11966 return;
11967 }
11968 if (Rd == Rn)
11969 {
11970 inst.instruction = THUMB_OP16 (inst.instruction);
11971 inst.instruction |= Rd;
11972 inst.instruction |= Rs << 3;
11973 return;
11974 }
11975 }
11976
11977 /* If we get here, it can't be done in 16 bits. */
11978 constraint (inst.operands[2].shifted
11979 && inst.operands[2].immisreg,
11980 _("shift must be constant"));
11981 inst.instruction = THUMB_OP32 (inst.instruction);
11982 inst.instruction |= Rd << 8;
11983 inst.instruction |= Rs << 16;
11984 encode_thumb32_shifted_operand (2);
11985 }
11986 }
11987 else
11988 {
11989 /* On its face this is a lie - the instruction does set the
11990 flags. However, the only supported mnemonic in this mode
11991 says it doesn't. */
11992 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11993
11994 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11995 _("unshifted register required"));
11996 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11997
11998 inst.instruction = THUMB_OP16 (inst.instruction);
11999 inst.instruction |= Rd;
12000
12001 if (Rd == Rs)
12002 inst.instruction |= Rn << 3;
12003 else if (Rd == Rn)
12004 inst.instruction |= Rs << 3;
12005 else
12006 constraint (1, _("dest must overlap one source register"));
12007 }
12008 }
12009
12010 static void
12011 do_t_bfc (void)
12012 {
12013 unsigned Rd;
12014 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
12015 constraint (msb > 32, _("bit-field extends past end of register"));
12016 /* The instruction encoding stores the LSB and MSB,
12017 not the LSB and width. */
12018 Rd = inst.operands[0].reg;
12019 reject_bad_reg (Rd);
12020 inst.instruction |= Rd << 8;
12021 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
12022 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
12023 inst.instruction |= msb - 1;
12024 }
12025
12026 static void
12027 do_t_bfi (void)
12028 {
12029 int Rd, Rn;
12030 unsigned int msb;
12031
12032 Rd = inst.operands[0].reg;
12033 reject_bad_reg (Rd);
12034
12035 /* #0 in second position is alternative syntax for bfc, which is
12036 the same instruction but with REG_PC in the Rm field. */
12037 if (!inst.operands[1].isreg)
12038 Rn = REG_PC;
12039 else
12040 {
12041 Rn = inst.operands[1].reg;
12042 reject_bad_reg (Rn);
12043 }
12044
12045 msb = inst.operands[2].imm + inst.operands[3].imm;
12046 constraint (msb > 32, _("bit-field extends past end of register"));
12047 /* The instruction encoding stores the LSB and MSB,
12048 not the LSB and width. */
12049 inst.instruction |= Rd << 8;
12050 inst.instruction |= Rn << 16;
12051 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12052 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12053 inst.instruction |= msb - 1;
12054 }
12055
12056 static void
12057 do_t_bfx (void)
12058 {
12059 unsigned Rd, Rn;
12060
12061 Rd = inst.operands[0].reg;
12062 Rn = inst.operands[1].reg;
12063
12064 reject_bad_reg (Rd);
12065 reject_bad_reg (Rn);
12066
12067 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12068 _("bit-field extends past end of register"));
12069 inst.instruction |= Rd << 8;
12070 inst.instruction |= Rn << 16;
12071 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12072 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12073 inst.instruction |= inst.operands[3].imm - 1;
12074 }
12075
12076 /* ARM V5 Thumb BLX (argument parse)
12077 BLX <target_addr> which is BLX(1)
12078 BLX <Rm> which is BLX(2)
12079 Unfortunately, there are two different opcodes for this mnemonic.
12080 So, the insns[].value is not used, and the code here zaps values
12081 into inst.instruction.
12082
12083 ??? How to take advantage of the additional two bits of displacement
12084 available in Thumb32 mode? Need new relocation? */
12085
12086 static void
12087 do_t_blx (void)
12088 {
12089 set_pred_insn_type_last ();
12090
12091 if (inst.operands[0].isreg)
12092 {
12093 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12094 /* We have a register, so this is BLX(2). */
12095 inst.instruction |= inst.operands[0].reg << 3;
12096 }
12097 else
12098 {
12099 /* No register. This must be BLX(1). */
12100 inst.instruction = 0xf000e800;
12101 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12102 }
12103 }
12104
12105 static void
12106 do_t_branch (void)
12107 {
12108 int opcode;
12109 int cond;
12110 bfd_reloc_code_real_type reloc;
12111
12112 cond = inst.cond;
12113 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12114
12115 if (in_pred_block ())
12116 {
12117 /* Conditional branches inside IT blocks are encoded as unconditional
12118 branches. */
12119 cond = COND_ALWAYS;
12120 }
12121 else
12122 cond = inst.cond;
12123
12124 if (cond != COND_ALWAYS)
12125 opcode = T_MNEM_bcond;
12126 else
12127 opcode = inst.instruction;
12128
12129 if (unified_syntax
12130 && (inst.size_req == 4
12131 || (inst.size_req != 2
12132 && (inst.operands[0].hasreloc
12133 || inst.relocs[0].exp.X_op == O_constant))))
12134 {
12135 inst.instruction = THUMB_OP32(opcode);
12136 if (cond == COND_ALWAYS)
12137 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12138 else
12139 {
12140 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12141 _("selected architecture does not support "
12142 "wide conditional branch instruction"));
12143
12144 gas_assert (cond != 0xF);
12145 inst.instruction |= cond << 22;
12146 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12147 }
12148 }
12149 else
12150 {
12151 inst.instruction = THUMB_OP16(opcode);
12152 if (cond == COND_ALWAYS)
12153 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12154 else
12155 {
12156 inst.instruction |= cond << 8;
12157 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12158 }
12159 /* Allow section relaxation. */
12160 if (unified_syntax && inst.size_req != 2)
12161 inst.relax = opcode;
12162 }
12163 inst.relocs[0].type = reloc;
12164 inst.relocs[0].pc_rel = 1;
12165 }
12166
12167 /* Actually do the work for Thumb state bkpt and hlt. The only difference
12168 between the two is the maximum immediate allowed - which is passed in
12169 RANGE. */
12170 static void
12171 do_t_bkpt_hlt1 (int range)
12172 {
12173 constraint (inst.cond != COND_ALWAYS,
12174 _("instruction is always unconditional"));
12175 if (inst.operands[0].present)
12176 {
12177 constraint (inst.operands[0].imm > range,
12178 _("immediate value out of range"));
12179 inst.instruction |= inst.operands[0].imm;
12180 }
12181
12182 set_pred_insn_type (NEUTRAL_IT_INSN);
12183 }
12184
12185 static void
12186 do_t_hlt (void)
12187 {
12188 do_t_bkpt_hlt1 (63);
12189 }
12190
12191 static void
12192 do_t_bkpt (void)
12193 {
12194 do_t_bkpt_hlt1 (255);
12195 }
12196
12197 static void
12198 do_t_branch23 (void)
12199 {
12200 set_pred_insn_type_last ();
12201 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12202
12203 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12204 this file. We used to simply ignore the PLT reloc type here --
12205 the branch encoding is now needed to deal with TLSCALL relocs.
12206 So if we see a PLT reloc now, put it back to how it used to be to
12207 keep the preexisting behaviour. */
12208 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12209 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12210
12211 #if defined(OBJ_COFF)
12212 /* If the destination of the branch is a defined symbol which does not have
12213 the THUMB_FUNC attribute, then we must be calling a function which has
12214 the (interfacearm) attribute. We look for the Thumb entry point to that
12215 function and change the branch to refer to that function instead. */
12216 if ( inst.relocs[0].exp.X_op == O_symbol
12217 && inst.relocs[0].exp.X_add_symbol != NULL
12218 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12219 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12220 inst.relocs[0].exp.X_add_symbol
12221 = find_real_start (inst.relocs[0].exp.X_add_symbol);
12222 #endif
12223 }
12224
12225 static void
12226 do_t_bx (void)
12227 {
12228 set_pred_insn_type_last ();
12229 inst.instruction |= inst.operands[0].reg << 3;
12230 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
12231 should cause the alignment to be checked once it is known. This is
12232 because BX PC only works if the instruction is word aligned. */
12233 }
12234
12235 static void
12236 do_t_bxj (void)
12237 {
12238 int Rm;
12239
12240 set_pred_insn_type_last ();
12241 Rm = inst.operands[0].reg;
12242 reject_bad_reg (Rm);
12243 inst.instruction |= Rm << 16;
12244 }
12245
12246 static void
12247 do_t_clz (void)
12248 {
12249 unsigned Rd;
12250 unsigned Rm;
12251
12252 Rd = inst.operands[0].reg;
12253 Rm = inst.operands[1].reg;
12254
12255 reject_bad_reg (Rd);
12256 reject_bad_reg (Rm);
12257
12258 inst.instruction |= Rd << 8;
12259 inst.instruction |= Rm << 16;
12260 inst.instruction |= Rm;
12261 }
12262
12263 /* For the Armv8.1-M conditional instructions. */
12264 static void
12265 do_t_cond (void)
12266 {
12267 unsigned Rd, Rn, Rm;
12268 signed int cond;
12269
12270 constraint (inst.cond != COND_ALWAYS, BAD_COND);
12271
12272 Rd = inst.operands[0].reg;
12273 switch (inst.instruction)
12274 {
12275 case T_MNEM_csinc:
12276 case T_MNEM_csinv:
12277 case T_MNEM_csneg:
12278 case T_MNEM_csel:
12279 Rn = inst.operands[1].reg;
12280 Rm = inst.operands[2].reg;
12281 cond = inst.operands[3].imm;
12282 constraint (Rn == REG_SP, BAD_SP);
12283 constraint (Rm == REG_SP, BAD_SP);
12284 break;
12285
12286 case T_MNEM_cinc:
12287 case T_MNEM_cinv:
12288 case T_MNEM_cneg:
12289 Rn = inst.operands[1].reg;
12290 cond = inst.operands[2].imm;
12291 /* Invert the last bit to invert the cond. */
12292 cond = TOGGLE_BIT (cond, 0);
12293 constraint (Rn == REG_SP, BAD_SP);
12294 Rm = Rn;
12295 break;
12296
12297 case T_MNEM_csetm:
12298 case T_MNEM_cset:
12299 cond = inst.operands[1].imm;
12300 /* Invert the last bit to invert the cond. */
12301 cond = TOGGLE_BIT (cond, 0);
12302 Rn = REG_PC;
12303 Rm = REG_PC;
12304 break;
12305
12306 default: abort ();
12307 }
12308
12309 set_pred_insn_type (OUTSIDE_PRED_INSN);
12310 inst.instruction = THUMB_OP32 (inst.instruction);
12311 inst.instruction |= Rd << 8;
12312 inst.instruction |= Rn << 16;
12313 inst.instruction |= Rm;
12314 inst.instruction |= cond << 4;
12315 }
12316
12317 static void
12318 do_t_csdb (void)
12319 {
12320 set_pred_insn_type (OUTSIDE_PRED_INSN);
12321 }
12322
12323 static void
12324 do_t_cps (void)
12325 {
12326 set_pred_insn_type (OUTSIDE_PRED_INSN);
12327 inst.instruction |= inst.operands[0].imm;
12328 }
12329
12330 static void
12331 do_t_cpsi (void)
12332 {
12333 set_pred_insn_type (OUTSIDE_PRED_INSN);
12334 if (unified_syntax
12335 && (inst.operands[1].present || inst.size_req == 4)
12336 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12337 {
12338 unsigned int imod = (inst.instruction & 0x0030) >> 4;
12339 inst.instruction = 0xf3af8000;
12340 inst.instruction |= imod << 9;
12341 inst.instruction |= inst.operands[0].imm << 5;
12342 if (inst.operands[1].present)
12343 inst.instruction |= 0x100 | inst.operands[1].imm;
12344 }
12345 else
12346 {
12347 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12348 && (inst.operands[0].imm & 4),
12349 _("selected processor does not support 'A' form "
12350 "of this instruction"));
12351 constraint (inst.operands[1].present || inst.size_req == 4,
12352 _("Thumb does not support the 2-argument "
12353 "form of this instruction"));
12354 inst.instruction |= inst.operands[0].imm;
12355 }
12356 }
12357
12358 /* THUMB CPY instruction (argument parse). */
12359
12360 static void
12361 do_t_cpy (void)
12362 {
12363 if (inst.size_req == 4)
12364 {
12365 inst.instruction = THUMB_OP32 (T_MNEM_mov);
12366 inst.instruction |= inst.operands[0].reg << 8;
12367 inst.instruction |= inst.operands[1].reg;
12368 }
12369 else
12370 {
12371 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12372 inst.instruction |= (inst.operands[0].reg & 0x7);
12373 inst.instruction |= inst.operands[1].reg << 3;
12374 }
12375 }
12376
12377 static void
12378 do_t_cbz (void)
12379 {
12380 set_pred_insn_type (OUTSIDE_PRED_INSN);
12381 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12382 inst.instruction |= inst.operands[0].reg;
12383 inst.relocs[0].pc_rel = 1;
12384 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12385 }
12386
12387 static void
12388 do_t_dbg (void)
12389 {
12390 inst.instruction |= inst.operands[0].imm;
12391 }
12392
12393 static void
12394 do_t_div (void)
12395 {
12396 unsigned Rd, Rn, Rm;
12397
12398 Rd = inst.operands[0].reg;
12399 Rn = (inst.operands[1].present
12400 ? inst.operands[1].reg : Rd);
12401 Rm = inst.operands[2].reg;
12402
12403 reject_bad_reg (Rd);
12404 reject_bad_reg (Rn);
12405 reject_bad_reg (Rm);
12406
12407 inst.instruction |= Rd << 8;
12408 inst.instruction |= Rn << 16;
12409 inst.instruction |= Rm;
12410 }
12411
12412 static void
12413 do_t_hint (void)
12414 {
12415 if (unified_syntax && inst.size_req == 4)
12416 inst.instruction = THUMB_OP32 (inst.instruction);
12417 else
12418 inst.instruction = THUMB_OP16 (inst.instruction);
12419 }
12420
12421 static void
12422 do_t_it (void)
12423 {
12424 unsigned int cond = inst.operands[0].imm;
12425
12426 set_pred_insn_type (IT_INSN);
12427 now_pred.mask = (inst.instruction & 0xf) | 0x10;
12428 now_pred.cc = cond;
12429 now_pred.warn_deprecated = FALSE;
12430 now_pred.type = SCALAR_PRED;
12431
12432 /* If the condition is a negative condition, invert the mask. */
12433 if ((cond & 0x1) == 0x0)
12434 {
12435 unsigned int mask = inst.instruction & 0x000f;
12436
12437 if ((mask & 0x7) == 0)
12438 {
12439 /* No conversion needed. */
12440 now_pred.block_length = 1;
12441 }
12442 else if ((mask & 0x3) == 0)
12443 {
12444 mask ^= 0x8;
12445 now_pred.block_length = 2;
12446 }
12447 else if ((mask & 0x1) == 0)
12448 {
12449 mask ^= 0xC;
12450 now_pred.block_length = 3;
12451 }
12452 else
12453 {
12454 mask ^= 0xE;
12455 now_pred.block_length = 4;
12456 }
12457
12458 inst.instruction &= 0xfff0;
12459 inst.instruction |= mask;
12460 }
12461
12462 inst.instruction |= cond << 4;
12463 }
12464
12465 /* Helper function used for both push/pop and ldm/stm. */
12466 static void
12467 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
12468 bfd_boolean writeback)
12469 {
12470 bfd_boolean load, store;
12471
12472 gas_assert (base != -1 || !do_io);
12473 load = do_io && ((inst.instruction & (1 << 20)) != 0);
12474 store = do_io && !load;
12475
12476 if (mask & (1 << 13))
12477 inst.error = _("SP not allowed in register list");
12478
12479 if (do_io && (mask & (1 << base)) != 0
12480 && writeback)
12481 inst.error = _("having the base register in the register list when "
12482 "using write back is UNPREDICTABLE");
12483
12484 if (load)
12485 {
12486 if (mask & (1 << 15))
12487 {
12488 if (mask & (1 << 14))
12489 inst.error = _("LR and PC should not both be in register list");
12490 else
12491 set_pred_insn_type_last ();
12492 }
12493 }
12494 else if (store)
12495 {
12496 if (mask & (1 << 15))
12497 inst.error = _("PC not allowed in register list");
12498 }
12499
12500 if (do_io && ((mask & (mask - 1)) == 0))
12501 {
12502 /* Single register transfers implemented as str/ldr. */
12503 if (writeback)
12504 {
12505 if (inst.instruction & (1 << 23))
12506 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12507 else
12508 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12509 }
12510 else
12511 {
12512 if (inst.instruction & (1 << 23))
12513 inst.instruction = 0x00800000; /* ia -> [base] */
12514 else
12515 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12516 }
12517
12518 inst.instruction |= 0xf8400000;
12519 if (load)
12520 inst.instruction |= 0x00100000;
12521
12522 mask = ffs (mask) - 1;
12523 mask <<= 12;
12524 }
12525 else if (writeback)
12526 inst.instruction |= WRITE_BACK;
12527
12528 inst.instruction |= mask;
12529 if (do_io)
12530 inst.instruction |= base << 16;
12531 }
12532
12533 static void
12534 do_t_ldmstm (void)
12535 {
12536 /* This really doesn't seem worth it. */
12537 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12538 _("expression too complex"));
12539 constraint (inst.operands[1].writeback,
12540 _("Thumb load/store multiple does not support {reglist}^"));
12541
12542 if (unified_syntax)
12543 {
12544 bfd_boolean narrow;
12545 unsigned mask;
12546
12547 narrow = FALSE;
12548 /* See if we can use a 16-bit instruction. */
12549 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12550 && inst.size_req != 4
12551 && !(inst.operands[1].imm & ~0xff))
12552 {
12553 mask = 1 << inst.operands[0].reg;
12554
12555 if (inst.operands[0].reg <= 7)
12556 {
12557 if (inst.instruction == T_MNEM_stmia
12558 ? inst.operands[0].writeback
12559 : (inst.operands[0].writeback
12560 == !(inst.operands[1].imm & mask)))
12561 {
12562 if (inst.instruction == T_MNEM_stmia
12563 && (inst.operands[1].imm & mask)
12564 && (inst.operands[1].imm & (mask - 1)))
12565 as_warn (_("value stored for r%d is UNKNOWN"),
12566 inst.operands[0].reg);
12567
12568 inst.instruction = THUMB_OP16 (inst.instruction);
12569 inst.instruction |= inst.operands[0].reg << 8;
12570 inst.instruction |= inst.operands[1].imm;
12571 narrow = TRUE;
12572 }
12573 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12574 {
12575 /* This means 1 register in reg list one of 3 situations:
12576 1. Instruction is stmia, but without writeback.
12577 2. lmdia without writeback, but with Rn not in
12578 reglist.
12579 3. ldmia with writeback, but with Rn in reglist.
12580 Case 3 is UNPREDICTABLE behaviour, so we handle
12581 case 1 and 2 which can be converted into a 16-bit
12582 str or ldr. The SP cases are handled below. */
12583 unsigned long opcode;
12584 /* First, record an error for Case 3. */
12585 if (inst.operands[1].imm & mask
12586 && inst.operands[0].writeback)
12587 inst.error =
12588 _("having the base register in the register list when "
12589 "using write back is UNPREDICTABLE");
12590
12591 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12592 : T_MNEM_ldr);
12593 inst.instruction = THUMB_OP16 (opcode);
12594 inst.instruction |= inst.operands[0].reg << 3;
12595 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12596 narrow = TRUE;
12597 }
12598 }
12599 else if (inst.operands[0] .reg == REG_SP)
12600 {
12601 if (inst.operands[0].writeback)
12602 {
12603 inst.instruction =
12604 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12605 ? T_MNEM_push : T_MNEM_pop);
12606 inst.instruction |= inst.operands[1].imm;
12607 narrow = TRUE;
12608 }
12609 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12610 {
12611 inst.instruction =
12612 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12613 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12614 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12615 narrow = TRUE;
12616 }
12617 }
12618 }
12619
12620 if (!narrow)
12621 {
12622 if (inst.instruction < 0xffff)
12623 inst.instruction = THUMB_OP32 (inst.instruction);
12624
12625 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12626 inst.operands[1].imm,
12627 inst.operands[0].writeback);
12628 }
12629 }
12630 else
12631 {
12632 constraint (inst.operands[0].reg > 7
12633 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12634 constraint (inst.instruction != T_MNEM_ldmia
12635 && inst.instruction != T_MNEM_stmia,
12636 _("Thumb-2 instruction only valid in unified syntax"));
12637 if (inst.instruction == T_MNEM_stmia)
12638 {
12639 if (!inst.operands[0].writeback)
12640 as_warn (_("this instruction will write back the base register"));
12641 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12642 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12643 as_warn (_("value stored for r%d is UNKNOWN"),
12644 inst.operands[0].reg);
12645 }
12646 else
12647 {
12648 if (!inst.operands[0].writeback
12649 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12650 as_warn (_("this instruction will write back the base register"));
12651 else if (inst.operands[0].writeback
12652 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12653 as_warn (_("this instruction will not write back the base register"));
12654 }
12655
12656 inst.instruction = THUMB_OP16 (inst.instruction);
12657 inst.instruction |= inst.operands[0].reg << 8;
12658 inst.instruction |= inst.operands[1].imm;
12659 }
12660 }
12661
12662 static void
12663 do_t_ldrex (void)
12664 {
12665 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12666 || inst.operands[1].postind || inst.operands[1].writeback
12667 || inst.operands[1].immisreg || inst.operands[1].shifted
12668 || inst.operands[1].negative,
12669 BAD_ADDR_MODE);
12670
12671 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12672
12673 inst.instruction |= inst.operands[0].reg << 12;
12674 inst.instruction |= inst.operands[1].reg << 16;
12675 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12676 }
12677
12678 static void
12679 do_t_ldrexd (void)
12680 {
12681 if (!inst.operands[1].present)
12682 {
12683 constraint (inst.operands[0].reg == REG_LR,
12684 _("r14 not allowed as first register "
12685 "when second register is omitted"));
12686 inst.operands[1].reg = inst.operands[0].reg + 1;
12687 }
12688 constraint (inst.operands[0].reg == inst.operands[1].reg,
12689 BAD_OVERLAP);
12690
12691 inst.instruction |= inst.operands[0].reg << 12;
12692 inst.instruction |= inst.operands[1].reg << 8;
12693 inst.instruction |= inst.operands[2].reg << 16;
12694 }
12695
12696 static void
12697 do_t_ldst (void)
12698 {
12699 unsigned long opcode;
12700 int Rn;
12701
12702 if (inst.operands[0].isreg
12703 && !inst.operands[0].preind
12704 && inst.operands[0].reg == REG_PC)
12705 set_pred_insn_type_last ();
12706
12707 opcode = inst.instruction;
12708 if (unified_syntax)
12709 {
12710 if (!inst.operands[1].isreg)
12711 {
12712 if (opcode <= 0xffff)
12713 inst.instruction = THUMB_OP32 (opcode);
12714 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12715 return;
12716 }
12717 if (inst.operands[1].isreg
12718 && !inst.operands[1].writeback
12719 && !inst.operands[1].shifted && !inst.operands[1].postind
12720 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12721 && opcode <= 0xffff
12722 && inst.size_req != 4)
12723 {
12724 /* Insn may have a 16-bit form. */
12725 Rn = inst.operands[1].reg;
12726 if (inst.operands[1].immisreg)
12727 {
12728 inst.instruction = THUMB_OP16 (opcode);
12729 /* [Rn, Rik] */
12730 if (Rn <= 7 && inst.operands[1].imm <= 7)
12731 goto op16;
12732 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12733 reject_bad_reg (inst.operands[1].imm);
12734 }
12735 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12736 && opcode != T_MNEM_ldrsb)
12737 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12738 || (Rn == REG_SP && opcode == T_MNEM_str))
12739 {
12740 /* [Rn, #const] */
12741 if (Rn > 7)
12742 {
12743 if (Rn == REG_PC)
12744 {
12745 if (inst.relocs[0].pc_rel)
12746 opcode = T_MNEM_ldr_pc2;
12747 else
12748 opcode = T_MNEM_ldr_pc;
12749 }
12750 else
12751 {
12752 if (opcode == T_MNEM_ldr)
12753 opcode = T_MNEM_ldr_sp;
12754 else
12755 opcode = T_MNEM_str_sp;
12756 }
12757 inst.instruction = inst.operands[0].reg << 8;
12758 }
12759 else
12760 {
12761 inst.instruction = inst.operands[0].reg;
12762 inst.instruction |= inst.operands[1].reg << 3;
12763 }
12764 inst.instruction |= THUMB_OP16 (opcode);
12765 if (inst.size_req == 2)
12766 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12767 else
12768 inst.relax = opcode;
12769 return;
12770 }
12771 }
12772 /* Definitely a 32-bit variant. */
12773
12774 /* Warning for Erratum 752419. */
12775 if (opcode == T_MNEM_ldr
12776 && inst.operands[0].reg == REG_SP
12777 && inst.operands[1].writeback == 1
12778 && !inst.operands[1].immisreg)
12779 {
12780 if (no_cpu_selected ()
12781 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12782 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12783 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12784 as_warn (_("This instruction may be unpredictable "
12785 "if executed on M-profile cores "
12786 "with interrupts enabled."));
12787 }
12788
12789 /* Do some validations regarding addressing modes. */
12790 if (inst.operands[1].immisreg)
12791 reject_bad_reg (inst.operands[1].imm);
12792
12793 constraint (inst.operands[1].writeback == 1
12794 && inst.operands[0].reg == inst.operands[1].reg,
12795 BAD_OVERLAP);
12796
12797 inst.instruction = THUMB_OP32 (opcode);
12798 inst.instruction |= inst.operands[0].reg << 12;
12799 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12800 check_ldr_r15_aligned ();
12801 return;
12802 }
12803
12804 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12805
12806 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12807 {
12808 /* Only [Rn,Rm] is acceptable. */
12809 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12810 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12811 || inst.operands[1].postind || inst.operands[1].shifted
12812 || inst.operands[1].negative,
12813 _("Thumb does not support this addressing mode"));
12814 inst.instruction = THUMB_OP16 (inst.instruction);
12815 goto op16;
12816 }
12817
12818 inst.instruction = THUMB_OP16 (inst.instruction);
12819 if (!inst.operands[1].isreg)
12820 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12821 return;
12822
12823 constraint (!inst.operands[1].preind
12824 || inst.operands[1].shifted
12825 || inst.operands[1].writeback,
12826 _("Thumb does not support this addressing mode"));
12827 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12828 {
12829 constraint (inst.instruction & 0x0600,
12830 _("byte or halfword not valid for base register"));
12831 constraint (inst.operands[1].reg == REG_PC
12832 && !(inst.instruction & THUMB_LOAD_BIT),
12833 _("r15 based store not allowed"));
12834 constraint (inst.operands[1].immisreg,
12835 _("invalid base register for register offset"));
12836
12837 if (inst.operands[1].reg == REG_PC)
12838 inst.instruction = T_OPCODE_LDR_PC;
12839 else if (inst.instruction & THUMB_LOAD_BIT)
12840 inst.instruction = T_OPCODE_LDR_SP;
12841 else
12842 inst.instruction = T_OPCODE_STR_SP;
12843
12844 inst.instruction |= inst.operands[0].reg << 8;
12845 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12846 return;
12847 }
12848
12849 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12850 if (!inst.operands[1].immisreg)
12851 {
12852 /* Immediate offset. */
12853 inst.instruction |= inst.operands[0].reg;
12854 inst.instruction |= inst.operands[1].reg << 3;
12855 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12856 return;
12857 }
12858
12859 /* Register offset. */
12860 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12861 constraint (inst.operands[1].negative,
12862 _("Thumb does not support this addressing mode"));
12863
12864 op16:
12865 switch (inst.instruction)
12866 {
12867 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12868 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12869 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12870 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12871 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12872 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12873 case 0x5600 /* ldrsb */:
12874 case 0x5e00 /* ldrsh */: break;
12875 default: abort ();
12876 }
12877
12878 inst.instruction |= inst.operands[0].reg;
12879 inst.instruction |= inst.operands[1].reg << 3;
12880 inst.instruction |= inst.operands[1].imm << 6;
12881 }
12882
12883 static void
12884 do_t_ldstd (void)
12885 {
12886 if (!inst.operands[1].present)
12887 {
12888 inst.operands[1].reg = inst.operands[0].reg + 1;
12889 constraint (inst.operands[0].reg == REG_LR,
12890 _("r14 not allowed here"));
12891 constraint (inst.operands[0].reg == REG_R12,
12892 _("r12 not allowed here"));
12893 }
12894
12895 if (inst.operands[2].writeback
12896 && (inst.operands[0].reg == inst.operands[2].reg
12897 || inst.operands[1].reg == inst.operands[2].reg))
12898 as_warn (_("base register written back, and overlaps "
12899 "one of transfer registers"));
12900
12901 inst.instruction |= inst.operands[0].reg << 12;
12902 inst.instruction |= inst.operands[1].reg << 8;
12903 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12904 }
12905
12906 static void
12907 do_t_ldstt (void)
12908 {
12909 inst.instruction |= inst.operands[0].reg << 12;
12910 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12911 }
12912
12913 static void
12914 do_t_mla (void)
12915 {
12916 unsigned Rd, Rn, Rm, Ra;
12917
12918 Rd = inst.operands[0].reg;
12919 Rn = inst.operands[1].reg;
12920 Rm = inst.operands[2].reg;
12921 Ra = inst.operands[3].reg;
12922
12923 reject_bad_reg (Rd);
12924 reject_bad_reg (Rn);
12925 reject_bad_reg (Rm);
12926 reject_bad_reg (Ra);
12927
12928 inst.instruction |= Rd << 8;
12929 inst.instruction |= Rn << 16;
12930 inst.instruction |= Rm;
12931 inst.instruction |= Ra << 12;
12932 }
12933
12934 static void
12935 do_t_mlal (void)
12936 {
12937 unsigned RdLo, RdHi, Rn, Rm;
12938
12939 RdLo = inst.operands[0].reg;
12940 RdHi = inst.operands[1].reg;
12941 Rn = inst.operands[2].reg;
12942 Rm = inst.operands[3].reg;
12943
12944 reject_bad_reg (RdLo);
12945 reject_bad_reg (RdHi);
12946 reject_bad_reg (Rn);
12947 reject_bad_reg (Rm);
12948
12949 inst.instruction |= RdLo << 12;
12950 inst.instruction |= RdHi << 8;
12951 inst.instruction |= Rn << 16;
12952 inst.instruction |= Rm;
12953 }
12954
12955 static void
12956 do_t_mov_cmp (void)
12957 {
12958 unsigned Rn, Rm;
12959
12960 Rn = inst.operands[0].reg;
12961 Rm = inst.operands[1].reg;
12962
12963 if (Rn == REG_PC)
12964 set_pred_insn_type_last ();
12965
12966 if (unified_syntax)
12967 {
12968 int r0off = (inst.instruction == T_MNEM_mov
12969 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12970 unsigned long opcode;
12971 bfd_boolean narrow;
12972 bfd_boolean low_regs;
12973
12974 low_regs = (Rn <= 7 && Rm <= 7);
12975 opcode = inst.instruction;
12976 if (in_pred_block ())
12977 narrow = opcode != T_MNEM_movs;
12978 else
12979 narrow = opcode != T_MNEM_movs || low_regs;
12980 if (inst.size_req == 4
12981 || inst.operands[1].shifted)
12982 narrow = FALSE;
12983
12984 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12985 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12986 && !inst.operands[1].shifted
12987 && Rn == REG_PC
12988 && Rm == REG_LR)
12989 {
12990 inst.instruction = T2_SUBS_PC_LR;
12991 return;
12992 }
12993
12994 if (opcode == T_MNEM_cmp)
12995 {
12996 constraint (Rn == REG_PC, BAD_PC);
12997 if (narrow)
12998 {
12999 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
13000 but valid. */
13001 warn_deprecated_sp (Rm);
13002 /* R15 was documented as a valid choice for Rm in ARMv6,
13003 but as UNPREDICTABLE in ARMv7. ARM's proprietary
13004 tools reject R15, so we do too. */
13005 constraint (Rm == REG_PC, BAD_PC);
13006 }
13007 else
13008 reject_bad_reg (Rm);
13009 }
13010 else if (opcode == T_MNEM_mov
13011 || opcode == T_MNEM_movs)
13012 {
13013 if (inst.operands[1].isreg)
13014 {
13015 if (opcode == T_MNEM_movs)
13016 {
13017 reject_bad_reg (Rn);
13018 reject_bad_reg (Rm);
13019 }
13020 else if (narrow)
13021 {
13022 /* This is mov.n. */
13023 if ((Rn == REG_SP || Rn == REG_PC)
13024 && (Rm == REG_SP || Rm == REG_PC))
13025 {
13026 as_tsktsk (_("Use of r%u as a source register is "
13027 "deprecated when r%u is the destination "
13028 "register."), Rm, Rn);
13029 }
13030 }
13031 else
13032 {
13033 /* This is mov.w. */
13034 constraint (Rn == REG_PC, BAD_PC);
13035 constraint (Rm == REG_PC, BAD_PC);
13036 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13037 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
13038 }
13039 }
13040 else
13041 reject_bad_reg (Rn);
13042 }
13043
13044 if (!inst.operands[1].isreg)
13045 {
13046 /* Immediate operand. */
13047 if (!in_pred_block () && opcode == T_MNEM_mov)
13048 narrow = 0;
13049 if (low_regs && narrow)
13050 {
13051 inst.instruction = THUMB_OP16 (opcode);
13052 inst.instruction |= Rn << 8;
13053 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13054 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13055 {
13056 if (inst.size_req == 2)
13057 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13058 else
13059 inst.relax = opcode;
13060 }
13061 }
13062 else
13063 {
13064 constraint ((inst.relocs[0].type
13065 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13066 && (inst.relocs[0].type
13067 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13068 THUMB1_RELOC_ONLY);
13069
13070 inst.instruction = THUMB_OP32 (inst.instruction);
13071 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13072 inst.instruction |= Rn << r0off;
13073 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13074 }
13075 }
13076 else if (inst.operands[1].shifted && inst.operands[1].immisreg
13077 && (inst.instruction == T_MNEM_mov
13078 || inst.instruction == T_MNEM_movs))
13079 {
13080 /* Register shifts are encoded as separate shift instructions. */
13081 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
13082
13083 if (in_pred_block ())
13084 narrow = !flags;
13085 else
13086 narrow = flags;
13087
13088 if (inst.size_req == 4)
13089 narrow = FALSE;
13090
13091 if (!low_regs || inst.operands[1].imm > 7)
13092 narrow = FALSE;
13093
13094 if (Rn != Rm)
13095 narrow = FALSE;
13096
13097 switch (inst.operands[1].shift_kind)
13098 {
13099 case SHIFT_LSL:
13100 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13101 break;
13102 case SHIFT_ASR:
13103 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13104 break;
13105 case SHIFT_LSR:
13106 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13107 break;
13108 case SHIFT_ROR:
13109 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13110 break;
13111 default:
13112 abort ();
13113 }
13114
13115 inst.instruction = opcode;
13116 if (narrow)
13117 {
13118 inst.instruction |= Rn;
13119 inst.instruction |= inst.operands[1].imm << 3;
13120 }
13121 else
13122 {
13123 if (flags)
13124 inst.instruction |= CONDS_BIT;
13125
13126 inst.instruction |= Rn << 8;
13127 inst.instruction |= Rm << 16;
13128 inst.instruction |= inst.operands[1].imm;
13129 }
13130 }
13131 else if (!narrow)
13132 {
13133 /* Some mov with immediate shift have narrow variants.
13134 Register shifts are handled above. */
13135 if (low_regs && inst.operands[1].shifted
13136 && (inst.instruction == T_MNEM_mov
13137 || inst.instruction == T_MNEM_movs))
13138 {
13139 if (in_pred_block ())
13140 narrow = (inst.instruction == T_MNEM_mov);
13141 else
13142 narrow = (inst.instruction == T_MNEM_movs);
13143 }
13144
13145 if (narrow)
13146 {
13147 switch (inst.operands[1].shift_kind)
13148 {
13149 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13150 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13151 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13152 default: narrow = FALSE; break;
13153 }
13154 }
13155
13156 if (narrow)
13157 {
13158 inst.instruction |= Rn;
13159 inst.instruction |= Rm << 3;
13160 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13161 }
13162 else
13163 {
13164 inst.instruction = THUMB_OP32 (inst.instruction);
13165 inst.instruction |= Rn << r0off;
13166 encode_thumb32_shifted_operand (1);
13167 }
13168 }
13169 else
13170 switch (inst.instruction)
13171 {
13172 case T_MNEM_mov:
13173 /* In v4t or v5t a move of two lowregs produces unpredictable
13174 results. Don't allow this. */
13175 if (low_regs)
13176 {
13177 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13178 "MOV Rd, Rs with two low registers is not "
13179 "permitted on this architecture");
13180 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13181 arm_ext_v6);
13182 }
13183
13184 inst.instruction = T_OPCODE_MOV_HR;
13185 inst.instruction |= (Rn & 0x8) << 4;
13186 inst.instruction |= (Rn & 0x7);
13187 inst.instruction |= Rm << 3;
13188 break;
13189
13190 case T_MNEM_movs:
13191 /* We know we have low registers at this point.
13192 Generate LSLS Rd, Rs, #0. */
13193 inst.instruction = T_OPCODE_LSL_I;
13194 inst.instruction |= Rn;
13195 inst.instruction |= Rm << 3;
13196 break;
13197
13198 case T_MNEM_cmp:
13199 if (low_regs)
13200 {
13201 inst.instruction = T_OPCODE_CMP_LR;
13202 inst.instruction |= Rn;
13203 inst.instruction |= Rm << 3;
13204 }
13205 else
13206 {
13207 inst.instruction = T_OPCODE_CMP_HR;
13208 inst.instruction |= (Rn & 0x8) << 4;
13209 inst.instruction |= (Rn & 0x7);
13210 inst.instruction |= Rm << 3;
13211 }
13212 break;
13213 }
13214 return;
13215 }
13216
13217 inst.instruction = THUMB_OP16 (inst.instruction);
13218
13219 /* PR 10443: Do not silently ignore shifted operands. */
13220 constraint (inst.operands[1].shifted,
13221 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13222
13223 if (inst.operands[1].isreg)
13224 {
13225 if (Rn < 8 && Rm < 8)
13226 {
13227 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13228 since a MOV instruction produces unpredictable results. */
13229 if (inst.instruction == T_OPCODE_MOV_I8)
13230 inst.instruction = T_OPCODE_ADD_I3;
13231 else
13232 inst.instruction = T_OPCODE_CMP_LR;
13233
13234 inst.instruction |= Rn;
13235 inst.instruction |= Rm << 3;
13236 }
13237 else
13238 {
13239 if (inst.instruction == T_OPCODE_MOV_I8)
13240 inst.instruction = T_OPCODE_MOV_HR;
13241 else
13242 inst.instruction = T_OPCODE_CMP_HR;
13243 do_t_cpy ();
13244 }
13245 }
13246 else
13247 {
13248 constraint (Rn > 7,
13249 _("only lo regs allowed with immediate"));
13250 inst.instruction |= Rn << 8;
13251 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13252 }
13253 }
13254
13255 static void
13256 do_t_mov16 (void)
13257 {
13258 unsigned Rd;
13259 bfd_vma imm;
13260 bfd_boolean top;
13261
13262 top = (inst.instruction & 0x00800000) != 0;
13263 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13264 {
13265 constraint (top, _(":lower16: not allowed in this instruction"));
13266 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13267 }
13268 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13269 {
13270 constraint (!top, _(":upper16: not allowed in this instruction"));
13271 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13272 }
13273
13274 Rd = inst.operands[0].reg;
13275 reject_bad_reg (Rd);
13276
13277 inst.instruction |= Rd << 8;
13278 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13279 {
13280 imm = inst.relocs[0].exp.X_add_number;
13281 inst.instruction |= (imm & 0xf000) << 4;
13282 inst.instruction |= (imm & 0x0800) << 15;
13283 inst.instruction |= (imm & 0x0700) << 4;
13284 inst.instruction |= (imm & 0x00ff);
13285 }
13286 }
13287
13288 static void
13289 do_t_mvn_tst (void)
13290 {
13291 unsigned Rn, Rm;
13292
13293 Rn = inst.operands[0].reg;
13294 Rm = inst.operands[1].reg;
13295
13296 if (inst.instruction == T_MNEM_cmp
13297 || inst.instruction == T_MNEM_cmn)
13298 constraint (Rn == REG_PC, BAD_PC);
13299 else
13300 reject_bad_reg (Rn);
13301 reject_bad_reg (Rm);
13302
13303 if (unified_syntax)
13304 {
13305 int r0off = (inst.instruction == T_MNEM_mvn
13306 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13307 bfd_boolean narrow;
13308
13309 if (inst.size_req == 4
13310 || inst.instruction > 0xffff
13311 || inst.operands[1].shifted
13312 || Rn > 7 || Rm > 7)
13313 narrow = FALSE;
13314 else if (inst.instruction == T_MNEM_cmn
13315 || inst.instruction == T_MNEM_tst)
13316 narrow = TRUE;
13317 else if (THUMB_SETS_FLAGS (inst.instruction))
13318 narrow = !in_pred_block ();
13319 else
13320 narrow = in_pred_block ();
13321
13322 if (!inst.operands[1].isreg)
13323 {
13324 /* For an immediate, we always generate a 32-bit opcode;
13325 section relaxation will shrink it later if possible. */
13326 if (inst.instruction < 0xffff)
13327 inst.instruction = THUMB_OP32 (inst.instruction);
13328 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13329 inst.instruction |= Rn << r0off;
13330 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13331 }
13332 else
13333 {
13334 /* See if we can do this with a 16-bit instruction. */
13335 if (narrow)
13336 {
13337 inst.instruction = THUMB_OP16 (inst.instruction);
13338 inst.instruction |= Rn;
13339 inst.instruction |= Rm << 3;
13340 }
13341 else
13342 {
13343 constraint (inst.operands[1].shifted
13344 && inst.operands[1].immisreg,
13345 _("shift must be constant"));
13346 if (inst.instruction < 0xffff)
13347 inst.instruction = THUMB_OP32 (inst.instruction);
13348 inst.instruction |= Rn << r0off;
13349 encode_thumb32_shifted_operand (1);
13350 }
13351 }
13352 }
13353 else
13354 {
13355 constraint (inst.instruction > 0xffff
13356 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13357 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13358 _("unshifted register required"));
13359 constraint (Rn > 7 || Rm > 7,
13360 BAD_HIREG);
13361
13362 inst.instruction = THUMB_OP16 (inst.instruction);
13363 inst.instruction |= Rn;
13364 inst.instruction |= Rm << 3;
13365 }
13366 }
13367
13368 static void
13369 do_t_mrs (void)
13370 {
13371 unsigned Rd;
13372
13373 if (do_vfp_nsyn_mrs () == SUCCESS)
13374 return;
13375
13376 Rd = inst.operands[0].reg;
13377 reject_bad_reg (Rd);
13378 inst.instruction |= Rd << 8;
13379
13380 if (inst.operands[1].isreg)
13381 {
13382 unsigned br = inst.operands[1].reg;
13383 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13384 as_bad (_("bad register for mrs"));
13385
13386 inst.instruction |= br & (0xf << 16);
13387 inst.instruction |= (br & 0x300) >> 4;
13388 inst.instruction |= (br & SPSR_BIT) >> 2;
13389 }
13390 else
13391 {
13392 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13393
13394 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13395 {
13396 /* PR gas/12698: The constraint is only applied for m_profile.
13397 If the user has specified -march=all, we want to ignore it as
13398 we are building for any CPU type, including non-m variants. */
13399 bfd_boolean m_profile =
13400 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13401 constraint ((flags != 0) && m_profile, _("selected processor does "
13402 "not support requested special purpose register"));
13403 }
13404 else
13405 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13406 devices). */
13407 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13408 _("'APSR', 'CPSR' or 'SPSR' expected"));
13409
13410 inst.instruction |= (flags & SPSR_BIT) >> 2;
13411 inst.instruction |= inst.operands[1].imm & 0xff;
13412 inst.instruction |= 0xf0000;
13413 }
13414 }
13415
13416 static void
13417 do_t_msr (void)
13418 {
13419 int flags;
13420 unsigned Rn;
13421
13422 if (do_vfp_nsyn_msr () == SUCCESS)
13423 return;
13424
13425 constraint (!inst.operands[1].isreg,
13426 _("Thumb encoding does not support an immediate here"));
13427
13428 if (inst.operands[0].isreg)
13429 flags = (int)(inst.operands[0].reg);
13430 else
13431 flags = inst.operands[0].imm;
13432
13433 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13434 {
13435 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13436
13437 /* PR gas/12698: The constraint is only applied for m_profile.
13438 If the user has specified -march=all, we want to ignore it as
13439 we are building for any CPU type, including non-m variants. */
13440 bfd_boolean m_profile =
13441 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13442 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13443 && (bits & ~(PSR_s | PSR_f)) != 0)
13444 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13445 && bits != PSR_f)) && m_profile,
13446 _("selected processor does not support requested special "
13447 "purpose register"));
13448 }
13449 else
13450 constraint ((flags & 0xff) != 0, _("selected processor does not support "
13451 "requested special purpose register"));
13452
13453 Rn = inst.operands[1].reg;
13454 reject_bad_reg (Rn);
13455
13456 inst.instruction |= (flags & SPSR_BIT) >> 2;
13457 inst.instruction |= (flags & 0xf0000) >> 8;
13458 inst.instruction |= (flags & 0x300) >> 4;
13459 inst.instruction |= (flags & 0xff);
13460 inst.instruction |= Rn << 16;
13461 }
13462
13463 static void
13464 do_t_mul (void)
13465 {
13466 bfd_boolean narrow;
13467 unsigned Rd, Rn, Rm;
13468
13469 if (!inst.operands[2].present)
13470 inst.operands[2].reg = inst.operands[0].reg;
13471
13472 Rd = inst.operands[0].reg;
13473 Rn = inst.operands[1].reg;
13474 Rm = inst.operands[2].reg;
13475
13476 if (unified_syntax)
13477 {
13478 if (inst.size_req == 4
13479 || (Rd != Rn
13480 && Rd != Rm)
13481 || Rn > 7
13482 || Rm > 7)
13483 narrow = FALSE;
13484 else if (inst.instruction == T_MNEM_muls)
13485 narrow = !in_pred_block ();
13486 else
13487 narrow = in_pred_block ();
13488 }
13489 else
13490 {
13491 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13492 constraint (Rn > 7 || Rm > 7,
13493 BAD_HIREG);
13494 narrow = TRUE;
13495 }
13496
13497 if (narrow)
13498 {
13499 /* 16-bit MULS/Conditional MUL. */
13500 inst.instruction = THUMB_OP16 (inst.instruction);
13501 inst.instruction |= Rd;
13502
13503 if (Rd == Rn)
13504 inst.instruction |= Rm << 3;
13505 else if (Rd == Rm)
13506 inst.instruction |= Rn << 3;
13507 else
13508 constraint (1, _("dest must overlap one source register"));
13509 }
13510 else
13511 {
13512 constraint (inst.instruction != T_MNEM_mul,
13513 _("Thumb-2 MUL must not set flags"));
13514 /* 32-bit MUL. */
13515 inst.instruction = THUMB_OP32 (inst.instruction);
13516 inst.instruction |= Rd << 8;
13517 inst.instruction |= Rn << 16;
13518 inst.instruction |= Rm << 0;
13519
13520 reject_bad_reg (Rd);
13521 reject_bad_reg (Rn);
13522 reject_bad_reg (Rm);
13523 }
13524 }
13525
13526 static void
13527 do_t_mull (void)
13528 {
13529 unsigned RdLo, RdHi, Rn, Rm;
13530
13531 RdLo = inst.operands[0].reg;
13532 RdHi = inst.operands[1].reg;
13533 Rn = inst.operands[2].reg;
13534 Rm = inst.operands[3].reg;
13535
13536 reject_bad_reg (RdLo);
13537 reject_bad_reg (RdHi);
13538 reject_bad_reg (Rn);
13539 reject_bad_reg (Rm);
13540
13541 inst.instruction |= RdLo << 12;
13542 inst.instruction |= RdHi << 8;
13543 inst.instruction |= Rn << 16;
13544 inst.instruction |= Rm;
13545
13546 if (RdLo == RdHi)
13547 as_tsktsk (_("rdhi and rdlo must be different"));
13548 }
13549
13550 static void
13551 do_t_nop (void)
13552 {
13553 set_pred_insn_type (NEUTRAL_IT_INSN);
13554
13555 if (unified_syntax)
13556 {
13557 if (inst.size_req == 4 || inst.operands[0].imm > 15)
13558 {
13559 inst.instruction = THUMB_OP32 (inst.instruction);
13560 inst.instruction |= inst.operands[0].imm;
13561 }
13562 else
13563 {
13564 /* PR9722: Check for Thumb2 availability before
13565 generating a thumb2 nop instruction. */
13566 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13567 {
13568 inst.instruction = THUMB_OP16 (inst.instruction);
13569 inst.instruction |= inst.operands[0].imm << 4;
13570 }
13571 else
13572 inst.instruction = 0x46c0;
13573 }
13574 }
13575 else
13576 {
13577 constraint (inst.operands[0].present,
13578 _("Thumb does not support NOP with hints"));
13579 inst.instruction = 0x46c0;
13580 }
13581 }
13582
13583 static void
13584 do_t_neg (void)
13585 {
13586 if (unified_syntax)
13587 {
13588 bfd_boolean narrow;
13589
13590 if (THUMB_SETS_FLAGS (inst.instruction))
13591 narrow = !in_pred_block ();
13592 else
13593 narrow = in_pred_block ();
13594 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13595 narrow = FALSE;
13596 if (inst.size_req == 4)
13597 narrow = FALSE;
13598
13599 if (!narrow)
13600 {
13601 inst.instruction = THUMB_OP32 (inst.instruction);
13602 inst.instruction |= inst.operands[0].reg << 8;
13603 inst.instruction |= inst.operands[1].reg << 16;
13604 }
13605 else
13606 {
13607 inst.instruction = THUMB_OP16 (inst.instruction);
13608 inst.instruction |= inst.operands[0].reg;
13609 inst.instruction |= inst.operands[1].reg << 3;
13610 }
13611 }
13612 else
13613 {
13614 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13615 BAD_HIREG);
13616 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13617
13618 inst.instruction = THUMB_OP16 (inst.instruction);
13619 inst.instruction |= inst.operands[0].reg;
13620 inst.instruction |= inst.operands[1].reg << 3;
13621 }
13622 }
13623
13624 static void
13625 do_t_orn (void)
13626 {
13627 unsigned Rd, Rn;
13628
13629 Rd = inst.operands[0].reg;
13630 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13631
13632 reject_bad_reg (Rd);
13633 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13634 reject_bad_reg (Rn);
13635
13636 inst.instruction |= Rd << 8;
13637 inst.instruction |= Rn << 16;
13638
13639 if (!inst.operands[2].isreg)
13640 {
13641 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13642 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13643 }
13644 else
13645 {
13646 unsigned Rm;
13647
13648 Rm = inst.operands[2].reg;
13649 reject_bad_reg (Rm);
13650
13651 constraint (inst.operands[2].shifted
13652 && inst.operands[2].immisreg,
13653 _("shift must be constant"));
13654 encode_thumb32_shifted_operand (2);
13655 }
13656 }
13657
13658 static void
13659 do_t_pkhbt (void)
13660 {
13661 unsigned Rd, Rn, Rm;
13662
13663 Rd = inst.operands[0].reg;
13664 Rn = inst.operands[1].reg;
13665 Rm = inst.operands[2].reg;
13666
13667 reject_bad_reg (Rd);
13668 reject_bad_reg (Rn);
13669 reject_bad_reg (Rm);
13670
13671 inst.instruction |= Rd << 8;
13672 inst.instruction |= Rn << 16;
13673 inst.instruction |= Rm;
13674 if (inst.operands[3].present)
13675 {
13676 unsigned int val = inst.relocs[0].exp.X_add_number;
13677 constraint (inst.relocs[0].exp.X_op != O_constant,
13678 _("expression too complex"));
13679 inst.instruction |= (val & 0x1c) << 10;
13680 inst.instruction |= (val & 0x03) << 6;
13681 }
13682 }
13683
13684 static void
13685 do_t_pkhtb (void)
13686 {
13687 if (!inst.operands[3].present)
13688 {
13689 unsigned Rtmp;
13690
13691 inst.instruction &= ~0x00000020;
13692
13693 /* PR 10168. Swap the Rm and Rn registers. */
13694 Rtmp = inst.operands[1].reg;
13695 inst.operands[1].reg = inst.operands[2].reg;
13696 inst.operands[2].reg = Rtmp;
13697 }
13698 do_t_pkhbt ();
13699 }
13700
13701 static void
13702 do_t_pld (void)
13703 {
13704 if (inst.operands[0].immisreg)
13705 reject_bad_reg (inst.operands[0].imm);
13706
13707 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13708 }
13709
13710 static void
13711 do_t_push_pop (void)
13712 {
13713 unsigned mask;
13714
13715 constraint (inst.operands[0].writeback,
13716 _("push/pop do not support {reglist}^"));
13717 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13718 _("expression too complex"));
13719
13720 mask = inst.operands[0].imm;
13721 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13722 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13723 else if (inst.size_req != 4
13724 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13725 ? REG_LR : REG_PC)))
13726 {
13727 inst.instruction = THUMB_OP16 (inst.instruction);
13728 inst.instruction |= THUMB_PP_PC_LR;
13729 inst.instruction |= mask & 0xff;
13730 }
13731 else if (unified_syntax)
13732 {
13733 inst.instruction = THUMB_OP32 (inst.instruction);
13734 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13735 }
13736 else
13737 {
13738 inst.error = _("invalid register list to push/pop instruction");
13739 return;
13740 }
13741 }
13742
13743 static void
13744 do_t_clrm (void)
13745 {
13746 if (unified_syntax)
13747 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13748 else
13749 {
13750 inst.error = _("invalid register list to push/pop instruction");
13751 return;
13752 }
13753 }
13754
13755 static void
13756 do_t_vscclrm (void)
13757 {
13758 if (inst.operands[0].issingle)
13759 {
13760 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13761 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13762 inst.instruction |= inst.operands[0].imm;
13763 }
13764 else
13765 {
13766 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13767 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13768 inst.instruction |= 1 << 8;
13769 inst.instruction |= inst.operands[0].imm << 1;
13770 }
13771 }
13772
13773 static void
13774 do_t_rbit (void)
13775 {
13776 unsigned Rd, Rm;
13777
13778 Rd = inst.operands[0].reg;
13779 Rm = inst.operands[1].reg;
13780
13781 reject_bad_reg (Rd);
13782 reject_bad_reg (Rm);
13783
13784 inst.instruction |= Rd << 8;
13785 inst.instruction |= Rm << 16;
13786 inst.instruction |= Rm;
13787 }
13788
13789 static void
13790 do_t_rev (void)
13791 {
13792 unsigned Rd, Rm;
13793
13794 Rd = inst.operands[0].reg;
13795 Rm = inst.operands[1].reg;
13796
13797 reject_bad_reg (Rd);
13798 reject_bad_reg (Rm);
13799
13800 if (Rd <= 7 && Rm <= 7
13801 && inst.size_req != 4)
13802 {
13803 inst.instruction = THUMB_OP16 (inst.instruction);
13804 inst.instruction |= Rd;
13805 inst.instruction |= Rm << 3;
13806 }
13807 else if (unified_syntax)
13808 {
13809 inst.instruction = THUMB_OP32 (inst.instruction);
13810 inst.instruction |= Rd << 8;
13811 inst.instruction |= Rm << 16;
13812 inst.instruction |= Rm;
13813 }
13814 else
13815 inst.error = BAD_HIREG;
13816 }
13817
13818 static void
13819 do_t_rrx (void)
13820 {
13821 unsigned Rd, Rm;
13822
13823 Rd = inst.operands[0].reg;
13824 Rm = inst.operands[1].reg;
13825
13826 reject_bad_reg (Rd);
13827 reject_bad_reg (Rm);
13828
13829 inst.instruction |= Rd << 8;
13830 inst.instruction |= Rm;
13831 }
13832
13833 static void
13834 do_t_rsb (void)
13835 {
13836 unsigned Rd, Rs;
13837
13838 Rd = inst.operands[0].reg;
13839 Rs = (inst.operands[1].present
13840 ? inst.operands[1].reg /* Rd, Rs, foo */
13841 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13842
13843 reject_bad_reg (Rd);
13844 reject_bad_reg (Rs);
13845 if (inst.operands[2].isreg)
13846 reject_bad_reg (inst.operands[2].reg);
13847
13848 inst.instruction |= Rd << 8;
13849 inst.instruction |= Rs << 16;
13850 if (!inst.operands[2].isreg)
13851 {
13852 bfd_boolean narrow;
13853
13854 if ((inst.instruction & 0x00100000) != 0)
13855 narrow = !in_pred_block ();
13856 else
13857 narrow = in_pred_block ();
13858
13859 if (Rd > 7 || Rs > 7)
13860 narrow = FALSE;
13861
13862 if (inst.size_req == 4 || !unified_syntax)
13863 narrow = FALSE;
13864
13865 if (inst.relocs[0].exp.X_op != O_constant
13866 || inst.relocs[0].exp.X_add_number != 0)
13867 narrow = FALSE;
13868
13869 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13870 relaxation, but it doesn't seem worth the hassle. */
13871 if (narrow)
13872 {
13873 inst.relocs[0].type = BFD_RELOC_UNUSED;
13874 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13875 inst.instruction |= Rs << 3;
13876 inst.instruction |= Rd;
13877 }
13878 else
13879 {
13880 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13881 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13882 }
13883 }
13884 else
13885 encode_thumb32_shifted_operand (2);
13886 }
13887
13888 static void
13889 do_t_setend (void)
13890 {
13891 if (warn_on_deprecated
13892 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13893 as_tsktsk (_("setend use is deprecated for ARMv8"));
13894
13895 set_pred_insn_type (OUTSIDE_PRED_INSN);
13896 if (inst.operands[0].imm)
13897 inst.instruction |= 0x8;
13898 }
13899
13900 static void
13901 do_t_shift (void)
13902 {
13903 if (!inst.operands[1].present)
13904 inst.operands[1].reg = inst.operands[0].reg;
13905
13906 if (unified_syntax)
13907 {
13908 bfd_boolean narrow;
13909 int shift_kind;
13910
13911 switch (inst.instruction)
13912 {
13913 case T_MNEM_asr:
13914 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13915 case T_MNEM_lsl:
13916 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13917 case T_MNEM_lsr:
13918 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13919 case T_MNEM_ror:
13920 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13921 default: abort ();
13922 }
13923
13924 if (THUMB_SETS_FLAGS (inst.instruction))
13925 narrow = !in_pred_block ();
13926 else
13927 narrow = in_pred_block ();
13928 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13929 narrow = FALSE;
13930 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13931 narrow = FALSE;
13932 if (inst.operands[2].isreg
13933 && (inst.operands[1].reg != inst.operands[0].reg
13934 || inst.operands[2].reg > 7))
13935 narrow = FALSE;
13936 if (inst.size_req == 4)
13937 narrow = FALSE;
13938
13939 reject_bad_reg (inst.operands[0].reg);
13940 reject_bad_reg (inst.operands[1].reg);
13941
13942 if (!narrow)
13943 {
13944 if (inst.operands[2].isreg)
13945 {
13946 reject_bad_reg (inst.operands[2].reg);
13947 inst.instruction = THUMB_OP32 (inst.instruction);
13948 inst.instruction |= inst.operands[0].reg << 8;
13949 inst.instruction |= inst.operands[1].reg << 16;
13950 inst.instruction |= inst.operands[2].reg;
13951
13952 /* PR 12854: Error on extraneous shifts. */
13953 constraint (inst.operands[2].shifted,
13954 _("extraneous shift as part of operand to shift insn"));
13955 }
13956 else
13957 {
13958 inst.operands[1].shifted = 1;
13959 inst.operands[1].shift_kind = shift_kind;
13960 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13961 ? T_MNEM_movs : T_MNEM_mov);
13962 inst.instruction |= inst.operands[0].reg << 8;
13963 encode_thumb32_shifted_operand (1);
13964 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13965 inst.relocs[0].type = BFD_RELOC_UNUSED;
13966 }
13967 }
13968 else
13969 {
13970 if (inst.operands[2].isreg)
13971 {
13972 switch (shift_kind)
13973 {
13974 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13975 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13976 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13977 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13978 default: abort ();
13979 }
13980
13981 inst.instruction |= inst.operands[0].reg;
13982 inst.instruction |= inst.operands[2].reg << 3;
13983
13984 /* PR 12854: Error on extraneous shifts. */
13985 constraint (inst.operands[2].shifted,
13986 _("extraneous shift as part of operand to shift insn"));
13987 }
13988 else
13989 {
13990 switch (shift_kind)
13991 {
13992 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13993 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13994 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13995 default: abort ();
13996 }
13997 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13998 inst.instruction |= inst.operands[0].reg;
13999 inst.instruction |= inst.operands[1].reg << 3;
14000 }
14001 }
14002 }
14003 else
14004 {
14005 constraint (inst.operands[0].reg > 7
14006 || inst.operands[1].reg > 7, BAD_HIREG);
14007 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
14008
14009 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
14010 {
14011 constraint (inst.operands[2].reg > 7, BAD_HIREG);
14012 constraint (inst.operands[0].reg != inst.operands[1].reg,
14013 _("source1 and dest must be same register"));
14014
14015 switch (inst.instruction)
14016 {
14017 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
14018 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
14019 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
14020 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
14021 default: abort ();
14022 }
14023
14024 inst.instruction |= inst.operands[0].reg;
14025 inst.instruction |= inst.operands[2].reg << 3;
14026
14027 /* PR 12854: Error on extraneous shifts. */
14028 constraint (inst.operands[2].shifted,
14029 _("extraneous shift as part of operand to shift insn"));
14030 }
14031 else
14032 {
14033 switch (inst.instruction)
14034 {
14035 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
14036 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
14037 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
14038 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
14039 default: abort ();
14040 }
14041 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14042 inst.instruction |= inst.operands[0].reg;
14043 inst.instruction |= inst.operands[1].reg << 3;
14044 }
14045 }
14046 }
14047
14048 static void
14049 do_t_simd (void)
14050 {
14051 unsigned Rd, Rn, Rm;
14052
14053 Rd = inst.operands[0].reg;
14054 Rn = inst.operands[1].reg;
14055 Rm = inst.operands[2].reg;
14056
14057 reject_bad_reg (Rd);
14058 reject_bad_reg (Rn);
14059 reject_bad_reg (Rm);
14060
14061 inst.instruction |= Rd << 8;
14062 inst.instruction |= Rn << 16;
14063 inst.instruction |= Rm;
14064 }
14065
14066 static void
14067 do_t_simd2 (void)
14068 {
14069 unsigned Rd, Rn, Rm;
14070
14071 Rd = inst.operands[0].reg;
14072 Rm = inst.operands[1].reg;
14073 Rn = inst.operands[2].reg;
14074
14075 reject_bad_reg (Rd);
14076 reject_bad_reg (Rn);
14077 reject_bad_reg (Rm);
14078
14079 inst.instruction |= Rd << 8;
14080 inst.instruction |= Rn << 16;
14081 inst.instruction |= Rm;
14082 }
14083
14084 static void
14085 do_t_smc (void)
14086 {
14087 unsigned int value = inst.relocs[0].exp.X_add_number;
14088 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14089 _("SMC is not permitted on this architecture"));
14090 constraint (inst.relocs[0].exp.X_op != O_constant,
14091 _("expression too complex"));
14092 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14093
14094 inst.relocs[0].type = BFD_RELOC_UNUSED;
14095 inst.instruction |= (value & 0x000f) << 16;
14096
14097 /* PR gas/15623: SMC instructions must be last in an IT block. */
14098 set_pred_insn_type_last ();
14099 }
14100
14101 static void
14102 do_t_hvc (void)
14103 {
14104 unsigned int value = inst.relocs[0].exp.X_add_number;
14105
14106 inst.relocs[0].type = BFD_RELOC_UNUSED;
14107 inst.instruction |= (value & 0x0fff);
14108 inst.instruction |= (value & 0xf000) << 4;
14109 }
14110
14111 static void
14112 do_t_ssat_usat (int bias)
14113 {
14114 unsigned Rd, Rn;
14115
14116 Rd = inst.operands[0].reg;
14117 Rn = inst.operands[2].reg;
14118
14119 reject_bad_reg (Rd);
14120 reject_bad_reg (Rn);
14121
14122 inst.instruction |= Rd << 8;
14123 inst.instruction |= inst.operands[1].imm - bias;
14124 inst.instruction |= Rn << 16;
14125
14126 if (inst.operands[3].present)
14127 {
14128 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14129
14130 inst.relocs[0].type = BFD_RELOC_UNUSED;
14131
14132 constraint (inst.relocs[0].exp.X_op != O_constant,
14133 _("expression too complex"));
14134
14135 if (shift_amount != 0)
14136 {
14137 constraint (shift_amount > 31,
14138 _("shift expression is too large"));
14139
14140 if (inst.operands[3].shift_kind == SHIFT_ASR)
14141 inst.instruction |= 0x00200000; /* sh bit. */
14142
14143 inst.instruction |= (shift_amount & 0x1c) << 10;
14144 inst.instruction |= (shift_amount & 0x03) << 6;
14145 }
14146 }
14147 }
14148
14149 static void
14150 do_t_ssat (void)
14151 {
14152 do_t_ssat_usat (1);
14153 }
14154
14155 static void
14156 do_t_ssat16 (void)
14157 {
14158 unsigned Rd, Rn;
14159
14160 Rd = inst.operands[0].reg;
14161 Rn = inst.operands[2].reg;
14162
14163 reject_bad_reg (Rd);
14164 reject_bad_reg (Rn);
14165
14166 inst.instruction |= Rd << 8;
14167 inst.instruction |= inst.operands[1].imm - 1;
14168 inst.instruction |= Rn << 16;
14169 }
14170
14171 static void
14172 do_t_strex (void)
14173 {
14174 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14175 || inst.operands[2].postind || inst.operands[2].writeback
14176 || inst.operands[2].immisreg || inst.operands[2].shifted
14177 || inst.operands[2].negative,
14178 BAD_ADDR_MODE);
14179
14180 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14181
14182 inst.instruction |= inst.operands[0].reg << 8;
14183 inst.instruction |= inst.operands[1].reg << 12;
14184 inst.instruction |= inst.operands[2].reg << 16;
14185 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14186 }
14187
14188 static void
14189 do_t_strexd (void)
14190 {
14191 if (!inst.operands[2].present)
14192 inst.operands[2].reg = inst.operands[1].reg + 1;
14193
14194 constraint (inst.operands[0].reg == inst.operands[1].reg
14195 || inst.operands[0].reg == inst.operands[2].reg
14196 || inst.operands[0].reg == inst.operands[3].reg,
14197 BAD_OVERLAP);
14198
14199 inst.instruction |= inst.operands[0].reg;
14200 inst.instruction |= inst.operands[1].reg << 12;
14201 inst.instruction |= inst.operands[2].reg << 8;
14202 inst.instruction |= inst.operands[3].reg << 16;
14203 }
14204
14205 static void
14206 do_t_sxtah (void)
14207 {
14208 unsigned Rd, Rn, Rm;
14209
14210 Rd = inst.operands[0].reg;
14211 Rn = inst.operands[1].reg;
14212 Rm = inst.operands[2].reg;
14213
14214 reject_bad_reg (Rd);
14215 reject_bad_reg (Rn);
14216 reject_bad_reg (Rm);
14217
14218 inst.instruction |= Rd << 8;
14219 inst.instruction |= Rn << 16;
14220 inst.instruction |= Rm;
14221 inst.instruction |= inst.operands[3].imm << 4;
14222 }
14223
14224 static void
14225 do_t_sxth (void)
14226 {
14227 unsigned Rd, Rm;
14228
14229 Rd = inst.operands[0].reg;
14230 Rm = inst.operands[1].reg;
14231
14232 reject_bad_reg (Rd);
14233 reject_bad_reg (Rm);
14234
14235 if (inst.instruction <= 0xffff
14236 && inst.size_req != 4
14237 && Rd <= 7 && Rm <= 7
14238 && (!inst.operands[2].present || inst.operands[2].imm == 0))
14239 {
14240 inst.instruction = THUMB_OP16 (inst.instruction);
14241 inst.instruction |= Rd;
14242 inst.instruction |= Rm << 3;
14243 }
14244 else if (unified_syntax)
14245 {
14246 if (inst.instruction <= 0xffff)
14247 inst.instruction = THUMB_OP32 (inst.instruction);
14248 inst.instruction |= Rd << 8;
14249 inst.instruction |= Rm;
14250 inst.instruction |= inst.operands[2].imm << 4;
14251 }
14252 else
14253 {
14254 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14255 _("Thumb encoding does not support rotation"));
14256 constraint (1, BAD_HIREG);
14257 }
14258 }
14259
14260 static void
14261 do_t_swi (void)
14262 {
14263 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14264 }
14265
14266 static void
14267 do_t_tb (void)
14268 {
14269 unsigned Rn, Rm;
14270 int half;
14271
14272 half = (inst.instruction & 0x10) != 0;
14273 set_pred_insn_type_last ();
14274 constraint (inst.operands[0].immisreg,
14275 _("instruction requires register index"));
14276
14277 Rn = inst.operands[0].reg;
14278 Rm = inst.operands[0].imm;
14279
14280 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14281 constraint (Rn == REG_SP, BAD_SP);
14282 reject_bad_reg (Rm);
14283
14284 constraint (!half && inst.operands[0].shifted,
14285 _("instruction does not allow shifted index"));
14286 inst.instruction |= (Rn << 16) | Rm;
14287 }
14288
14289 static void
14290 do_t_udf (void)
14291 {
14292 if (!inst.operands[0].present)
14293 inst.operands[0].imm = 0;
14294
14295 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14296 {
14297 constraint (inst.size_req == 2,
14298 _("immediate value out of range"));
14299 inst.instruction = THUMB_OP32 (inst.instruction);
14300 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14301 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14302 }
14303 else
14304 {
14305 inst.instruction = THUMB_OP16 (inst.instruction);
14306 inst.instruction |= inst.operands[0].imm;
14307 }
14308
14309 set_pred_insn_type (NEUTRAL_IT_INSN);
14310 }
14311
14312
14313 static void
14314 do_t_usat (void)
14315 {
14316 do_t_ssat_usat (0);
14317 }
14318
14319 static void
14320 do_t_usat16 (void)
14321 {
14322 unsigned Rd, Rn;
14323
14324 Rd = inst.operands[0].reg;
14325 Rn = inst.operands[2].reg;
14326
14327 reject_bad_reg (Rd);
14328 reject_bad_reg (Rn);
14329
14330 inst.instruction |= Rd << 8;
14331 inst.instruction |= inst.operands[1].imm;
14332 inst.instruction |= Rn << 16;
14333 }
14334
14335 /* Checking the range of the branch offset (VAL) with NBITS bits
14336 and IS_SIGNED signedness. Also checks the LSB to be 0. */
14337 static int
14338 v8_1_branch_value_check (int val, int nbits, int is_signed)
14339 {
14340 gas_assert (nbits > 0 && nbits <= 32);
14341 if (is_signed)
14342 {
14343 int cmp = (1 << (nbits - 1));
14344 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14345 return FAIL;
14346 }
14347 else
14348 {
14349 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14350 return FAIL;
14351 }
14352 return SUCCESS;
14353 }
14354
14355 /* For branches in Armv8.1-M Mainline. */
14356 static void
14357 do_t_branch_future (void)
14358 {
14359 unsigned long insn = inst.instruction;
14360
14361 inst.instruction = THUMB_OP32 (inst.instruction);
14362 if (inst.operands[0].hasreloc == 0)
14363 {
14364 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
14365 as_bad (BAD_BRANCH_OFF);
14366
14367 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14368 }
14369 else
14370 {
14371 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14372 inst.relocs[0].pc_rel = 1;
14373 }
14374
14375 switch (insn)
14376 {
14377 case T_MNEM_bf:
14378 if (inst.operands[1].hasreloc == 0)
14379 {
14380 int val = inst.operands[1].imm;
14381 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
14382 as_bad (BAD_BRANCH_OFF);
14383
14384 int immA = (val & 0x0001f000) >> 12;
14385 int immB = (val & 0x00000ffc) >> 2;
14386 int immC = (val & 0x00000002) >> 1;
14387 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14388 }
14389 else
14390 {
14391 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14392 inst.relocs[1].pc_rel = 1;
14393 }
14394 break;
14395
14396 case T_MNEM_bfl:
14397 if (inst.operands[1].hasreloc == 0)
14398 {
14399 int val = inst.operands[1].imm;
14400 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
14401 as_bad (BAD_BRANCH_OFF);
14402
14403 int immA = (val & 0x0007f000) >> 12;
14404 int immB = (val & 0x00000ffc) >> 2;
14405 int immC = (val & 0x00000002) >> 1;
14406 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14407 }
14408 else
14409 {
14410 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14411 inst.relocs[1].pc_rel = 1;
14412 }
14413 break;
14414
14415 case T_MNEM_bfcsel:
14416 /* Operand 1. */
14417 if (inst.operands[1].hasreloc == 0)
14418 {
14419 int val = inst.operands[1].imm;
14420 int immA = (val & 0x00001000) >> 12;
14421 int immB = (val & 0x00000ffc) >> 2;
14422 int immC = (val & 0x00000002) >> 1;
14423 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14424 }
14425 else
14426 {
14427 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14428 inst.relocs[1].pc_rel = 1;
14429 }
14430
14431 /* Operand 2. */
14432 if (inst.operands[2].hasreloc == 0)
14433 {
14434 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14435 int val2 = inst.operands[2].imm;
14436 int val0 = inst.operands[0].imm & 0x1f;
14437 int diff = val2 - val0;
14438 if (diff == 4)
14439 inst.instruction |= 1 << 17; /* T bit. */
14440 else if (diff != 2)
14441 as_bad (_("out of range label-relative fixup value"));
14442 }
14443 else
14444 {
14445 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14446 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14447 inst.relocs[2].pc_rel = 1;
14448 }
14449
14450 /* Operand 3. */
14451 constraint (inst.cond != COND_ALWAYS, BAD_COND);
14452 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14453 break;
14454
14455 case T_MNEM_bfx:
14456 case T_MNEM_bflx:
14457 inst.instruction |= inst.operands[1].reg << 16;
14458 break;
14459
14460 default: abort ();
14461 }
14462 }
14463
14464 /* Helper function for do_t_loloop to handle relocations. */
14465 static void
14466 v8_1_loop_reloc (int is_le)
14467 {
14468 if (inst.relocs[0].exp.X_op == O_constant)
14469 {
14470 int value = inst.relocs[0].exp.X_add_number;
14471 value = (is_le) ? -value : value;
14472
14473 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
14474 as_bad (BAD_BRANCH_OFF);
14475
14476 int imml, immh;
14477
14478 immh = (value & 0x00000ffc) >> 2;
14479 imml = (value & 0x00000002) >> 1;
14480
14481 inst.instruction |= (imml << 11) | (immh << 1);
14482 }
14483 else
14484 {
14485 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14486 inst.relocs[0].pc_rel = 1;
14487 }
14488 }
14489
14490 /* For shifts with four operands in MVE. */
14491 static void
14492 do_mve_scalar_shift1 (void)
14493 {
14494 unsigned int value = inst.operands[2].imm;
14495
14496 inst.instruction |= inst.operands[0].reg << 16;
14497 inst.instruction |= inst.operands[1].reg << 8;
14498
14499 /* Setting the bit for saturation. */
14500 inst.instruction |= ((value == 64) ? 0: 1) << 7;
14501
14502 /* Assuming Rm is already checked not to be 11x1. */
14503 constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14504 constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14505 inst.instruction |= inst.operands[3].reg << 12;
14506 }
14507
14508 /* For shifts in MVE. */
14509 static void
14510 do_mve_scalar_shift (void)
14511 {
14512 if (!inst.operands[2].present)
14513 {
14514 inst.operands[2] = inst.operands[1];
14515 inst.operands[1].reg = 0xf;
14516 }
14517
14518 inst.instruction |= inst.operands[0].reg << 16;
14519 inst.instruction |= inst.operands[1].reg << 8;
14520
14521 if (inst.operands[2].isreg)
14522 {
14523 /* Assuming Rm is already checked not to be 11x1. */
14524 constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14525 constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14526 inst.instruction |= inst.operands[2].reg << 12;
14527 }
14528 else
14529 {
14530 /* Assuming imm is already checked as [1,32]. */
14531 unsigned int value = inst.operands[2].imm;
14532 inst.instruction |= (value & 0x1c) << 10;
14533 inst.instruction |= (value & 0x03) << 6;
14534 /* Change last 4 bits from 0xd to 0xf. */
14535 inst.instruction |= 0x2;
14536 }
14537 }
14538
14539 /* MVE instruction encoder helpers. */
14540 #define M_MNEM_vabav 0xee800f01
14541 #define M_MNEM_vmladav 0xeef00e00
14542 #define M_MNEM_vmladava 0xeef00e20
14543 #define M_MNEM_vmladavx 0xeef01e00
14544 #define M_MNEM_vmladavax 0xeef01e20
14545 #define M_MNEM_vmlsdav 0xeef00e01
14546 #define M_MNEM_vmlsdava 0xeef00e21
14547 #define M_MNEM_vmlsdavx 0xeef01e01
14548 #define M_MNEM_vmlsdavax 0xeef01e21
14549 #define M_MNEM_vmullt 0xee011e00
14550 #define M_MNEM_vmullb 0xee010e00
14551 #define M_MNEM_vctp 0xf000e801
14552 #define M_MNEM_vst20 0xfc801e00
14553 #define M_MNEM_vst21 0xfc801e20
14554 #define M_MNEM_vst40 0xfc801e01
14555 #define M_MNEM_vst41 0xfc801e21
14556 #define M_MNEM_vst42 0xfc801e41
14557 #define M_MNEM_vst43 0xfc801e61
14558 #define M_MNEM_vld20 0xfc901e00
14559 #define M_MNEM_vld21 0xfc901e20
14560 #define M_MNEM_vld40 0xfc901e01
14561 #define M_MNEM_vld41 0xfc901e21
14562 #define M_MNEM_vld42 0xfc901e41
14563 #define M_MNEM_vld43 0xfc901e61
14564 #define M_MNEM_vstrb 0xec000e00
14565 #define M_MNEM_vstrh 0xec000e10
14566 #define M_MNEM_vstrw 0xec000e40
14567 #define M_MNEM_vstrd 0xec000e50
14568 #define M_MNEM_vldrb 0xec100e00
14569 #define M_MNEM_vldrh 0xec100e10
14570 #define M_MNEM_vldrw 0xec100e40
14571 #define M_MNEM_vldrd 0xec100e50
14572 #define M_MNEM_vmovlt 0xeea01f40
14573 #define M_MNEM_vmovlb 0xeea00f40
14574 #define M_MNEM_vmovnt 0xfe311e81
14575 #define M_MNEM_vmovnb 0xfe310e81
14576 #define M_MNEM_vadc 0xee300f00
14577 #define M_MNEM_vadci 0xee301f00
14578 #define M_MNEM_vbrsr 0xfe011e60
14579 #define M_MNEM_vaddlv 0xee890f00
14580 #define M_MNEM_vaddlva 0xee890f20
14581 #define M_MNEM_vaddv 0xeef10f00
14582 #define M_MNEM_vaddva 0xeef10f20
14583 #define M_MNEM_vddup 0xee011f6e
14584 #define M_MNEM_vdwdup 0xee011f60
14585 #define M_MNEM_vidup 0xee010f6e
14586 #define M_MNEM_viwdup 0xee010f60
14587 #define M_MNEM_vmaxv 0xeee20f00
14588 #define M_MNEM_vmaxav 0xeee00f00
14589 #define M_MNEM_vminv 0xeee20f80
14590 #define M_MNEM_vminav 0xeee00f80
14591 #define M_MNEM_vmlaldav 0xee800e00
14592 #define M_MNEM_vmlaldava 0xee800e20
14593 #define M_MNEM_vmlaldavx 0xee801e00
14594 #define M_MNEM_vmlaldavax 0xee801e20
14595 #define M_MNEM_vmlsldav 0xee800e01
14596 #define M_MNEM_vmlsldava 0xee800e21
14597 #define M_MNEM_vmlsldavx 0xee801e01
14598 #define M_MNEM_vmlsldavax 0xee801e21
14599 #define M_MNEM_vrmlaldavhx 0xee801f00
14600 #define M_MNEM_vrmlaldavhax 0xee801f20
14601 #define M_MNEM_vrmlsldavh 0xfe800e01
14602 #define M_MNEM_vrmlsldavha 0xfe800e21
14603 #define M_MNEM_vrmlsldavhx 0xfe801e01
14604 #define M_MNEM_vrmlsldavhax 0xfe801e21
14605 #define M_MNEM_vqmovnt 0xee331e01
14606 #define M_MNEM_vqmovnb 0xee330e01
14607 #define M_MNEM_vqmovunt 0xee311e81
14608 #define M_MNEM_vqmovunb 0xee310e81
14609 #define M_MNEM_vshrnt 0xee801fc1
14610 #define M_MNEM_vshrnb 0xee800fc1
14611 #define M_MNEM_vrshrnt 0xfe801fc1
14612 #define M_MNEM_vqshrnt 0xee801f40
14613 #define M_MNEM_vqshrnb 0xee800f40
14614 #define M_MNEM_vqshrunt 0xee801fc0
14615 #define M_MNEM_vqshrunb 0xee800fc0
14616 #define M_MNEM_vrshrnb 0xfe800fc1
14617 #define M_MNEM_vqrshrnt 0xee801f41
14618 #define M_MNEM_vqrshrnb 0xee800f41
14619 #define M_MNEM_vqrshrunt 0xfe801fc0
14620 #define M_MNEM_vqrshrunb 0xfe800fc0
14621
14622 /* Bfloat16 instruction encoder helpers. */
14623 #define B_MNEM_vfmat 0xfc300850
14624 #define B_MNEM_vfmab 0xfc300810
14625
14626 /* Neon instruction encoder helpers. */
14627
14628 /* Encodings for the different types for various Neon opcodes. */
14629
14630 /* An "invalid" code for the following tables. */
14631 #define N_INV -1u
14632
14633 struct neon_tab_entry
14634 {
14635 unsigned integer;
14636 unsigned float_or_poly;
14637 unsigned scalar_or_imm;
14638 };
14639
14640 /* Map overloaded Neon opcodes to their respective encodings. */
14641 #define NEON_ENC_TAB \
14642 X(vabd, 0x0000700, 0x1200d00, N_INV), \
14643 X(vabdl, 0x0800700, N_INV, N_INV), \
14644 X(vmax, 0x0000600, 0x0000f00, N_INV), \
14645 X(vmin, 0x0000610, 0x0200f00, N_INV), \
14646 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
14647 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
14648 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
14649 X(vadd, 0x0000800, 0x0000d00, N_INV), \
14650 X(vaddl, 0x0800000, N_INV, N_INV), \
14651 X(vsub, 0x1000800, 0x0200d00, N_INV), \
14652 X(vsubl, 0x0800200, N_INV, N_INV), \
14653 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
14654 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
14655 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
14656 /* Register variants of the following two instructions are encoded as
14657 vcge / vcgt with the operands reversed. */ \
14658 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
14659 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
14660 X(vfma, N_INV, 0x0000c10, N_INV), \
14661 X(vfms, N_INV, 0x0200c10, N_INV), \
14662 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14663 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14664 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14665 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14666 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14667 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14668 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14669 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14670 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14671 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14672 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14673 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14674 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14675 X(vshl, 0x0000400, N_INV, 0x0800510), \
14676 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14677 X(vand, 0x0000110, N_INV, 0x0800030), \
14678 X(vbic, 0x0100110, N_INV, 0x0800030), \
14679 X(veor, 0x1000110, N_INV, N_INV), \
14680 X(vorn, 0x0300110, N_INV, 0x0800010), \
14681 X(vorr, 0x0200110, N_INV, 0x0800010), \
14682 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14683 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14684 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14685 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14686 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14687 X(vst1, 0x0000000, 0x0800000, N_INV), \
14688 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14689 X(vst2, 0x0000100, 0x0800100, N_INV), \
14690 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14691 X(vst3, 0x0000200, 0x0800200, N_INV), \
14692 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14693 X(vst4, 0x0000300, 0x0800300, N_INV), \
14694 X(vmovn, 0x1b20200, N_INV, N_INV), \
14695 X(vtrn, 0x1b20080, N_INV, N_INV), \
14696 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14697 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14698 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14699 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14700 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14701 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14702 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14703 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14704 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14705 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14706 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14707 X(vseleq, 0xe000a00, N_INV, N_INV), \
14708 X(vselvs, 0xe100a00, N_INV, N_INV), \
14709 X(vselge, 0xe200a00, N_INV, N_INV), \
14710 X(vselgt, 0xe300a00, N_INV, N_INV), \
14711 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14712 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14713 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14714 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14715 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14716 X(aes, 0x3b00300, N_INV, N_INV), \
14717 X(sha3op, 0x2000c00, N_INV, N_INV), \
14718 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14719 X(sha2op, 0x3ba0380, N_INV, N_INV)
14720
14721 enum neon_opc
14722 {
14723 #define X(OPC,I,F,S) N_MNEM_##OPC
14724 NEON_ENC_TAB
14725 #undef X
14726 };
14727
14728 static const struct neon_tab_entry neon_enc_tab[] =
14729 {
14730 #define X(OPC,I,F,S) { (I), (F), (S) }
14731 NEON_ENC_TAB
14732 #undef X
14733 };
14734
14735 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14736 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14737 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14738 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14739 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14740 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14741 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14742 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14743 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14744 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14745 #define NEON_ENC_SINGLE_(X) \
14746 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14747 #define NEON_ENC_DOUBLE_(X) \
14748 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14749 #define NEON_ENC_FPV8_(X) \
14750 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14751
14752 #define NEON_ENCODE(type, inst) \
14753 do \
14754 { \
14755 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14756 inst.is_neon = 1; \
14757 } \
14758 while (0)
14759
14760 #define check_neon_suffixes \
14761 do \
14762 { \
14763 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14764 { \
14765 as_bad (_("invalid neon suffix for non neon instruction")); \
14766 return; \
14767 } \
14768 } \
14769 while (0)
14770
14771 /* Define shapes for instruction operands. The following mnemonic characters
14772 are used in this table:
14773
14774 F - VFP S<n> register
14775 D - Neon D<n> register
14776 Q - Neon Q<n> register
14777 I - Immediate
14778 S - Scalar
14779 R - ARM register
14780 L - D<n> register list
14781
14782 This table is used to generate various data:
14783 - enumerations of the form NS_DDR to be used as arguments to
14784 neon_select_shape.
14785 - a table classifying shapes into single, double, quad, mixed.
14786 - a table used to drive neon_select_shape. */
14787
14788 #define NEON_SHAPE_DEF \
14789 X(4, (R, R, Q, Q), QUAD), \
14790 X(4, (Q, R, R, I), QUAD), \
14791 X(4, (R, R, S, S), QUAD), \
14792 X(4, (S, S, R, R), QUAD), \
14793 X(3, (Q, R, I), QUAD), \
14794 X(3, (I, Q, Q), QUAD), \
14795 X(3, (I, Q, R), QUAD), \
14796 X(3, (R, Q, Q), QUAD), \
14797 X(3, (D, D, D), DOUBLE), \
14798 X(3, (Q, Q, Q), QUAD), \
14799 X(3, (D, D, I), DOUBLE), \
14800 X(3, (Q, Q, I), QUAD), \
14801 X(3, (D, D, S), DOUBLE), \
14802 X(3, (Q, Q, S), QUAD), \
14803 X(3, (Q, Q, R), QUAD), \
14804 X(3, (R, R, Q), QUAD), \
14805 X(2, (R, Q), QUAD), \
14806 X(2, (D, D), DOUBLE), \
14807 X(2, (Q, Q), QUAD), \
14808 X(2, (D, S), DOUBLE), \
14809 X(2, (Q, S), QUAD), \
14810 X(2, (D, R), DOUBLE), \
14811 X(2, (Q, R), QUAD), \
14812 X(2, (D, I), DOUBLE), \
14813 X(2, (Q, I), QUAD), \
14814 X(3, (P, F, I), SINGLE), \
14815 X(3, (P, D, I), DOUBLE), \
14816 X(3, (P, Q, I), QUAD), \
14817 X(4, (P, F, F, I), SINGLE), \
14818 X(4, (P, D, D, I), DOUBLE), \
14819 X(4, (P, Q, Q, I), QUAD), \
14820 X(5, (P, F, F, F, I), SINGLE), \
14821 X(5, (P, D, D, D, I), DOUBLE), \
14822 X(5, (P, Q, Q, Q, I), QUAD), \
14823 X(3, (D, L, D), DOUBLE), \
14824 X(2, (D, Q), MIXED), \
14825 X(2, (Q, D), MIXED), \
14826 X(3, (D, Q, I), MIXED), \
14827 X(3, (Q, D, I), MIXED), \
14828 X(3, (Q, D, D), MIXED), \
14829 X(3, (D, Q, Q), MIXED), \
14830 X(3, (Q, Q, D), MIXED), \
14831 X(3, (Q, D, S), MIXED), \
14832 X(3, (D, Q, S), MIXED), \
14833 X(4, (D, D, D, I), DOUBLE), \
14834 X(4, (Q, Q, Q, I), QUAD), \
14835 X(4, (D, D, S, I), DOUBLE), \
14836 X(4, (Q, Q, S, I), QUAD), \
14837 X(2, (F, F), SINGLE), \
14838 X(3, (F, F, F), SINGLE), \
14839 X(2, (F, I), SINGLE), \
14840 X(2, (F, D), MIXED), \
14841 X(2, (D, F), MIXED), \
14842 X(3, (F, F, I), MIXED), \
14843 X(4, (R, R, F, F), SINGLE), \
14844 X(4, (F, F, R, R), SINGLE), \
14845 X(3, (D, R, R), DOUBLE), \
14846 X(3, (R, R, D), DOUBLE), \
14847 X(2, (S, R), SINGLE), \
14848 X(2, (R, S), SINGLE), \
14849 X(2, (F, R), SINGLE), \
14850 X(2, (R, F), SINGLE), \
14851 /* Used for MVE tail predicated loop instructions. */\
14852 X(2, (R, R), QUAD), \
14853 /* Half float shape supported so far. */\
14854 X (2, (H, D), MIXED), \
14855 X (2, (D, H), MIXED), \
14856 X (2, (H, F), MIXED), \
14857 X (2, (F, H), MIXED), \
14858 X (2, (H, H), HALF), \
14859 X (2, (H, R), HALF), \
14860 X (2, (R, H), HALF), \
14861 X (2, (H, I), HALF), \
14862 X (3, (H, H, H), HALF), \
14863 X (3, (H, F, I), MIXED), \
14864 X (3, (F, H, I), MIXED), \
14865 X (3, (D, H, H), MIXED), \
14866 X (3, (D, H, S), MIXED)
14867
14868 #define S2(A,B) NS_##A##B
14869 #define S3(A,B,C) NS_##A##B##C
14870 #define S4(A,B,C,D) NS_##A##B##C##D
14871 #define S5(A,B,C,D,E) NS_##A##B##C##D##E
14872
14873 #define X(N, L, C) S##N L
14874
14875 enum neon_shape
14876 {
14877 NEON_SHAPE_DEF,
14878 NS_NULL
14879 };
14880
14881 #undef X
14882 #undef S2
14883 #undef S3
14884 #undef S4
14885 #undef S5
14886
14887 enum neon_shape_class
14888 {
14889 SC_HALF,
14890 SC_SINGLE,
14891 SC_DOUBLE,
14892 SC_QUAD,
14893 SC_MIXED
14894 };
14895
14896 #define X(N, L, C) SC_##C
14897
14898 static enum neon_shape_class neon_shape_class[] =
14899 {
14900 NEON_SHAPE_DEF
14901 };
14902
14903 #undef X
14904
14905 enum neon_shape_el
14906 {
14907 SE_H,
14908 SE_F,
14909 SE_D,
14910 SE_Q,
14911 SE_I,
14912 SE_S,
14913 SE_R,
14914 SE_L,
14915 SE_P
14916 };
14917
14918 /* Register widths of above. */
14919 static unsigned neon_shape_el_size[] =
14920 {
14921 16,
14922 32,
14923 64,
14924 128,
14925 0,
14926 32,
14927 32,
14928 0,
14929 0
14930 };
14931
14932 struct neon_shape_info
14933 {
14934 unsigned els;
14935 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14936 };
14937
14938 #define S2(A,B) { SE_##A, SE_##B }
14939 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14940 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14941 #define S5(A,B,C,D,E) { SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
14942
14943 #define X(N, L, C) { N, S##N L }
14944
14945 static struct neon_shape_info neon_shape_tab[] =
14946 {
14947 NEON_SHAPE_DEF
14948 };
14949
14950 #undef X
14951 #undef S2
14952 #undef S3
14953 #undef S4
14954 #undef S5
14955
14956 /* Bit masks used in type checking given instructions.
14957 'N_EQK' means the type must be the same as (or based on in some way) the key
14958 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14959 set, various other bits can be set as well in order to modify the meaning of
14960 the type constraint. */
14961
14962 enum neon_type_mask
14963 {
14964 N_S8 = 0x0000001,
14965 N_S16 = 0x0000002,
14966 N_S32 = 0x0000004,
14967 N_S64 = 0x0000008,
14968 N_U8 = 0x0000010,
14969 N_U16 = 0x0000020,
14970 N_U32 = 0x0000040,
14971 N_U64 = 0x0000080,
14972 N_I8 = 0x0000100,
14973 N_I16 = 0x0000200,
14974 N_I32 = 0x0000400,
14975 N_I64 = 0x0000800,
14976 N_8 = 0x0001000,
14977 N_16 = 0x0002000,
14978 N_32 = 0x0004000,
14979 N_64 = 0x0008000,
14980 N_P8 = 0x0010000,
14981 N_P16 = 0x0020000,
14982 N_F16 = 0x0040000,
14983 N_F32 = 0x0080000,
14984 N_F64 = 0x0100000,
14985 N_P64 = 0x0200000,
14986 N_BF16 = 0x0400000,
14987 N_KEY = 0x1000000, /* Key element (main type specifier). */
14988 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14989 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14990 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14991 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14992 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14993 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14994 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14995 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14996 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14997 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14998 N_UTYP = 0,
14999 N_MAX_NONSPECIAL = N_P64
15000 };
15001
15002 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
15003
15004 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
15005 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15006 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
15007 #define N_S_32 (N_S8 | N_S16 | N_S32)
15008 #define N_F_16_32 (N_F16 | N_F32)
15009 #define N_SUF_32 (N_SU_32 | N_F_16_32)
15010 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
15011 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
15012 #define N_F_ALL (N_F16 | N_F32 | N_F64)
15013 #define N_I_MVE (N_I8 | N_I16 | N_I32)
15014 #define N_F_MVE (N_F16 | N_F32)
15015 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15016
15017 /* Pass this as the first type argument to neon_check_type to ignore types
15018 altogether. */
15019 #define N_IGNORE_TYPE (N_KEY | N_EQK)
15020
15021 /* Select a "shape" for the current instruction (describing register types or
15022 sizes) from a list of alternatives. Return NS_NULL if the current instruction
15023 doesn't fit. For non-polymorphic shapes, checking is usually done as a
15024 function of operand parsing, so this function doesn't need to be called.
15025 Shapes should be listed in order of decreasing length. */
15026
15027 static enum neon_shape
15028 neon_select_shape (enum neon_shape shape, ...)
15029 {
15030 va_list ap;
15031 enum neon_shape first_shape = shape;
15032
15033 /* Fix missing optional operands. FIXME: we don't know at this point how
15034 many arguments we should have, so this makes the assumption that we have
15035 > 1. This is true of all current Neon opcodes, I think, but may not be
15036 true in the future. */
15037 if (!inst.operands[1].present)
15038 inst.operands[1] = inst.operands[0];
15039
15040 va_start (ap, shape);
15041
15042 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
15043 {
15044 unsigned j;
15045 int matches = 1;
15046
15047 for (j = 0; j < neon_shape_tab[shape].els; j++)
15048 {
15049 if (!inst.operands[j].present)
15050 {
15051 matches = 0;
15052 break;
15053 }
15054
15055 switch (neon_shape_tab[shape].el[j])
15056 {
15057 /* If a .f16, .16, .u16, .s16 type specifier is given over
15058 a VFP single precision register operand, it's essentially
15059 means only half of the register is used.
15060
15061 If the type specifier is given after the mnemonics, the
15062 information is stored in inst.vectype. If the type specifier
15063 is given after register operand, the information is stored
15064 in inst.operands[].vectype.
15065
15066 When there is only one type specifier, and all the register
15067 operands are the same type of hardware register, the type
15068 specifier applies to all register operands.
15069
15070 If no type specifier is given, the shape is inferred from
15071 operand information.
15072
15073 for example:
15074 vadd.f16 s0, s1, s2: NS_HHH
15075 vabs.f16 s0, s1: NS_HH
15076 vmov.f16 s0, r1: NS_HR
15077 vmov.f16 r0, s1: NS_RH
15078 vcvt.f16 r0, s1: NS_RH
15079 vcvt.f16.s32 s2, s2, #29: NS_HFI
15080 vcvt.f16.s32 s2, s2: NS_HF
15081 */
15082 case SE_H:
15083 if (!(inst.operands[j].isreg
15084 && inst.operands[j].isvec
15085 && inst.operands[j].issingle
15086 && !inst.operands[j].isquad
15087 && ((inst.vectype.elems == 1
15088 && inst.vectype.el[0].size == 16)
15089 || (inst.vectype.elems > 1
15090 && inst.vectype.el[j].size == 16)
15091 || (inst.vectype.elems == 0
15092 && inst.operands[j].vectype.type != NT_invtype
15093 && inst.operands[j].vectype.size == 16))))
15094 matches = 0;
15095 break;
15096
15097 case SE_F:
15098 if (!(inst.operands[j].isreg
15099 && inst.operands[j].isvec
15100 && inst.operands[j].issingle
15101 && !inst.operands[j].isquad
15102 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15103 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15104 || (inst.vectype.elems == 0
15105 && (inst.operands[j].vectype.size == 32
15106 || inst.operands[j].vectype.type == NT_invtype)))))
15107 matches = 0;
15108 break;
15109
15110 case SE_D:
15111 if (!(inst.operands[j].isreg
15112 && inst.operands[j].isvec
15113 && !inst.operands[j].isquad
15114 && !inst.operands[j].issingle))
15115 matches = 0;
15116 break;
15117
15118 case SE_R:
15119 if (!(inst.operands[j].isreg
15120 && !inst.operands[j].isvec))
15121 matches = 0;
15122 break;
15123
15124 case SE_Q:
15125 if (!(inst.operands[j].isreg
15126 && inst.operands[j].isvec
15127 && inst.operands[j].isquad
15128 && !inst.operands[j].issingle))
15129 matches = 0;
15130 break;
15131
15132 case SE_I:
15133 if (!(!inst.operands[j].isreg
15134 && !inst.operands[j].isscalar))
15135 matches = 0;
15136 break;
15137
15138 case SE_S:
15139 if (!(!inst.operands[j].isreg
15140 && inst.operands[j].isscalar))
15141 matches = 0;
15142 break;
15143
15144 case SE_P:
15145 case SE_L:
15146 break;
15147 }
15148 if (!matches)
15149 break;
15150 }
15151 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15152 /* We've matched all the entries in the shape table, and we don't
15153 have any left over operands which have not been matched. */
15154 break;
15155 }
15156
15157 va_end (ap);
15158
15159 if (shape == NS_NULL && first_shape != NS_NULL)
15160 first_error (_("invalid instruction shape"));
15161
15162 return shape;
15163 }
15164
15165 /* True if SHAPE is predominantly a quadword operation (most of the time, this
15166 means the Q bit should be set). */
15167
15168 static int
15169 neon_quad (enum neon_shape shape)
15170 {
15171 return neon_shape_class[shape] == SC_QUAD;
15172 }
15173
15174 static void
15175 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15176 unsigned *g_size)
15177 {
15178 /* Allow modification to be made to types which are constrained to be
15179 based on the key element, based on bits set alongside N_EQK. */
15180 if ((typebits & N_EQK) != 0)
15181 {
15182 if ((typebits & N_HLF) != 0)
15183 *g_size /= 2;
15184 else if ((typebits & N_DBL) != 0)
15185 *g_size *= 2;
15186 if ((typebits & N_SGN) != 0)
15187 *g_type = NT_signed;
15188 else if ((typebits & N_UNS) != 0)
15189 *g_type = NT_unsigned;
15190 else if ((typebits & N_INT) != 0)
15191 *g_type = NT_integer;
15192 else if ((typebits & N_FLT) != 0)
15193 *g_type = NT_float;
15194 else if ((typebits & N_SIZ) != 0)
15195 *g_type = NT_untyped;
15196 }
15197 }
15198
15199 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15200 operand type, i.e. the single type specified in a Neon instruction when it
15201 is the only one given. */
15202
15203 static struct neon_type_el
15204 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15205 {
15206 struct neon_type_el dest = *key;
15207
15208 gas_assert ((thisarg & N_EQK) != 0);
15209
15210 neon_modify_type_size (thisarg, &dest.type, &dest.size);
15211
15212 return dest;
15213 }
15214
15215 /* Convert Neon type and size into compact bitmask representation. */
15216
15217 static enum neon_type_mask
15218 type_chk_of_el_type (enum neon_el_type type, unsigned size)
15219 {
15220 switch (type)
15221 {
15222 case NT_untyped:
15223 switch (size)
15224 {
15225 case 8: return N_8;
15226 case 16: return N_16;
15227 case 32: return N_32;
15228 case 64: return N_64;
15229 default: ;
15230 }
15231 break;
15232
15233 case NT_integer:
15234 switch (size)
15235 {
15236 case 8: return N_I8;
15237 case 16: return N_I16;
15238 case 32: return N_I32;
15239 case 64: return N_I64;
15240 default: ;
15241 }
15242 break;
15243
15244 case NT_float:
15245 switch (size)
15246 {
15247 case 16: return N_F16;
15248 case 32: return N_F32;
15249 case 64: return N_F64;
15250 default: ;
15251 }
15252 break;
15253
15254 case NT_poly:
15255 switch (size)
15256 {
15257 case 8: return N_P8;
15258 case 16: return N_P16;
15259 case 64: return N_P64;
15260 default: ;
15261 }
15262 break;
15263
15264 case NT_signed:
15265 switch (size)
15266 {
15267 case 8: return N_S8;
15268 case 16: return N_S16;
15269 case 32: return N_S32;
15270 case 64: return N_S64;
15271 default: ;
15272 }
15273 break;
15274
15275 case NT_unsigned:
15276 switch (size)
15277 {
15278 case 8: return N_U8;
15279 case 16: return N_U16;
15280 case 32: return N_U32;
15281 case 64: return N_U64;
15282 default: ;
15283 }
15284 break;
15285
15286 case NT_bfloat:
15287 if (size == 16) return N_BF16;
15288 break;
15289
15290 default: ;
15291 }
15292
15293 return N_UTYP;
15294 }
15295
15296 /* Convert compact Neon bitmask type representation to a type and size. Only
15297 handles the case where a single bit is set in the mask. */
15298
15299 static int
15300 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15301 enum neon_type_mask mask)
15302 {
15303 if ((mask & N_EQK) != 0)
15304 return FAIL;
15305
15306 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15307 *size = 8;
15308 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15309 != 0)
15310 *size = 16;
15311 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15312 *size = 32;
15313 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15314 *size = 64;
15315 else
15316 return FAIL;
15317
15318 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15319 *type = NT_signed;
15320 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15321 *type = NT_unsigned;
15322 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15323 *type = NT_integer;
15324 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15325 *type = NT_untyped;
15326 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15327 *type = NT_poly;
15328 else if ((mask & (N_F_ALL)) != 0)
15329 *type = NT_float;
15330 else if ((mask & (N_BF16)) != 0)
15331 *type = NT_bfloat;
15332 else
15333 return FAIL;
15334
15335 return SUCCESS;
15336 }
15337
15338 /* Modify a bitmask of allowed types. This is only needed for type
15339 relaxation. */
15340
15341 static unsigned
15342 modify_types_allowed (unsigned allowed, unsigned mods)
15343 {
15344 unsigned size;
15345 enum neon_el_type type;
15346 unsigned destmask;
15347 int i;
15348
15349 destmask = 0;
15350
15351 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15352 {
15353 if (el_type_of_type_chk (&type, &size,
15354 (enum neon_type_mask) (allowed & i)) == SUCCESS)
15355 {
15356 neon_modify_type_size (mods, &type, &size);
15357 destmask |= type_chk_of_el_type (type, size);
15358 }
15359 }
15360
15361 return destmask;
15362 }
15363
15364 /* Check type and return type classification.
15365 The manual states (paraphrase): If one datatype is given, it indicates the
15366 type given in:
15367 - the second operand, if there is one
15368 - the operand, if there is no second operand
15369 - the result, if there are no operands.
15370 This isn't quite good enough though, so we use a concept of a "key" datatype
15371 which is set on a per-instruction basis, which is the one which matters when
15372 only one data type is written.
15373 Note: this function has side-effects (e.g. filling in missing operands). All
15374 Neon instructions should call it before performing bit encoding. */
15375
15376 static struct neon_type_el
15377 neon_check_type (unsigned els, enum neon_shape ns, ...)
15378 {
15379 va_list ap;
15380 unsigned i, pass, key_el = 0;
15381 unsigned types[NEON_MAX_TYPE_ELS];
15382 enum neon_el_type k_type = NT_invtype;
15383 unsigned k_size = -1u;
15384 struct neon_type_el badtype = {NT_invtype, -1};
15385 unsigned key_allowed = 0;
15386
15387 /* Optional registers in Neon instructions are always (not) in operand 1.
15388 Fill in the missing operand here, if it was omitted. */
15389 if (els > 1 && !inst.operands[1].present)
15390 inst.operands[1] = inst.operands[0];
15391
15392 /* Suck up all the varargs. */
15393 va_start (ap, ns);
15394 for (i = 0; i < els; i++)
15395 {
15396 unsigned thisarg = va_arg (ap, unsigned);
15397 if (thisarg == N_IGNORE_TYPE)
15398 {
15399 va_end (ap);
15400 return badtype;
15401 }
15402 types[i] = thisarg;
15403 if ((thisarg & N_KEY) != 0)
15404 key_el = i;
15405 }
15406 va_end (ap);
15407
15408 if (inst.vectype.elems > 0)
15409 for (i = 0; i < els; i++)
15410 if (inst.operands[i].vectype.type != NT_invtype)
15411 {
15412 first_error (_("types specified in both the mnemonic and operands"));
15413 return badtype;
15414 }
15415
15416 /* Duplicate inst.vectype elements here as necessary.
15417 FIXME: No idea if this is exactly the same as the ARM assembler,
15418 particularly when an insn takes one register and one non-register
15419 operand. */
15420 if (inst.vectype.elems == 1 && els > 1)
15421 {
15422 unsigned j;
15423 inst.vectype.elems = els;
15424 inst.vectype.el[key_el] = inst.vectype.el[0];
15425 for (j = 0; j < els; j++)
15426 if (j != key_el)
15427 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15428 types[j]);
15429 }
15430 else if (inst.vectype.elems == 0 && els > 0)
15431 {
15432 unsigned j;
15433 /* No types were given after the mnemonic, so look for types specified
15434 after each operand. We allow some flexibility here; as long as the
15435 "key" operand has a type, we can infer the others. */
15436 for (j = 0; j < els; j++)
15437 if (inst.operands[j].vectype.type != NT_invtype)
15438 inst.vectype.el[j] = inst.operands[j].vectype;
15439
15440 if (inst.operands[key_el].vectype.type != NT_invtype)
15441 {
15442 for (j = 0; j < els; j++)
15443 if (inst.operands[j].vectype.type == NT_invtype)
15444 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15445 types[j]);
15446 }
15447 else
15448 {
15449 first_error (_("operand types can't be inferred"));
15450 return badtype;
15451 }
15452 }
15453 else if (inst.vectype.elems != els)
15454 {
15455 first_error (_("type specifier has the wrong number of parts"));
15456 return badtype;
15457 }
15458
15459 for (pass = 0; pass < 2; pass++)
15460 {
15461 for (i = 0; i < els; i++)
15462 {
15463 unsigned thisarg = types[i];
15464 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15465 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15466 enum neon_el_type g_type = inst.vectype.el[i].type;
15467 unsigned g_size = inst.vectype.el[i].size;
15468
15469 /* Decay more-specific signed & unsigned types to sign-insensitive
15470 integer types if sign-specific variants are unavailable. */
15471 if ((g_type == NT_signed || g_type == NT_unsigned)
15472 && (types_allowed & N_SU_ALL) == 0)
15473 g_type = NT_integer;
15474
15475 /* If only untyped args are allowed, decay any more specific types to
15476 them. Some instructions only care about signs for some element
15477 sizes, so handle that properly. */
15478 if (((types_allowed & N_UNT) == 0)
15479 && ((g_size == 8 && (types_allowed & N_8) != 0)
15480 || (g_size == 16 && (types_allowed & N_16) != 0)
15481 || (g_size == 32 && (types_allowed & N_32) != 0)
15482 || (g_size == 64 && (types_allowed & N_64) != 0)))
15483 g_type = NT_untyped;
15484
15485 if (pass == 0)
15486 {
15487 if ((thisarg & N_KEY) != 0)
15488 {
15489 k_type = g_type;
15490 k_size = g_size;
15491 key_allowed = thisarg & ~N_KEY;
15492
15493 /* Check architecture constraint on FP16 extension. */
15494 if (k_size == 16
15495 && k_type == NT_float
15496 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15497 {
15498 inst.error = _(BAD_FP16);
15499 return badtype;
15500 }
15501 }
15502 }
15503 else
15504 {
15505 if ((thisarg & N_VFP) != 0)
15506 {
15507 enum neon_shape_el regshape;
15508 unsigned regwidth, match;
15509
15510 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
15511 if (ns == NS_NULL)
15512 {
15513 first_error (_("invalid instruction shape"));
15514 return badtype;
15515 }
15516 regshape = neon_shape_tab[ns].el[i];
15517 regwidth = neon_shape_el_size[regshape];
15518
15519 /* In VFP mode, operands must match register widths. If we
15520 have a key operand, use its width, else use the width of
15521 the current operand. */
15522 if (k_size != -1u)
15523 match = k_size;
15524 else
15525 match = g_size;
15526
15527 /* FP16 will use a single precision register. */
15528 if (regwidth == 32 && match == 16)
15529 {
15530 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15531 match = regwidth;
15532 else
15533 {
15534 inst.error = _(BAD_FP16);
15535 return badtype;
15536 }
15537 }
15538
15539 if (regwidth != match)
15540 {
15541 first_error (_("operand size must match register width"));
15542 return badtype;
15543 }
15544 }
15545
15546 if ((thisarg & N_EQK) == 0)
15547 {
15548 unsigned given_type = type_chk_of_el_type (g_type, g_size);
15549
15550 if ((given_type & types_allowed) == 0)
15551 {
15552 first_error (BAD_SIMD_TYPE);
15553 return badtype;
15554 }
15555 }
15556 else
15557 {
15558 enum neon_el_type mod_k_type = k_type;
15559 unsigned mod_k_size = k_size;
15560 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15561 if (g_type != mod_k_type || g_size != mod_k_size)
15562 {
15563 first_error (_("inconsistent types in Neon instruction"));
15564 return badtype;
15565 }
15566 }
15567 }
15568 }
15569 }
15570
15571 return inst.vectype.el[key_el];
15572 }
15573
15574 /* Neon-style VFP instruction forwarding. */
15575
15576 /* Thumb VFP instructions have 0xE in the condition field. */
15577
15578 static void
15579 do_vfp_cond_or_thumb (void)
15580 {
15581 inst.is_neon = 1;
15582
15583 if (thumb_mode)
15584 inst.instruction |= 0xe0000000;
15585 else
15586 inst.instruction |= inst.cond << 28;
15587 }
15588
15589 /* Look up and encode a simple mnemonic, for use as a helper function for the
15590 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
15591 etc. It is assumed that operand parsing has already been done, and that the
15592 operands are in the form expected by the given opcode (this isn't necessarily
15593 the same as the form in which they were parsed, hence some massaging must
15594 take place before this function is called).
15595 Checks current arch version against that in the looked-up opcode. */
15596
15597 static void
15598 do_vfp_nsyn_opcode (const char *opname)
15599 {
15600 const struct asm_opcode *opcode;
15601
15602 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
15603
15604 if (!opcode)
15605 abort ();
15606
15607 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15608 thumb_mode ? *opcode->tvariant : *opcode->avariant),
15609 _(BAD_FPU));
15610
15611 inst.is_neon = 1;
15612
15613 if (thumb_mode)
15614 {
15615 inst.instruction = opcode->tvalue;
15616 opcode->tencode ();
15617 }
15618 else
15619 {
15620 inst.instruction = (inst.cond << 28) | opcode->avalue;
15621 opcode->aencode ();
15622 }
15623 }
15624
15625 static void
15626 do_vfp_nsyn_add_sub (enum neon_shape rs)
15627 {
15628 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15629
15630 if (rs == NS_FFF || rs == NS_HHH)
15631 {
15632 if (is_add)
15633 do_vfp_nsyn_opcode ("fadds");
15634 else
15635 do_vfp_nsyn_opcode ("fsubs");
15636
15637 /* ARMv8.2 fp16 instruction. */
15638 if (rs == NS_HHH)
15639 do_scalar_fp16_v82_encode ();
15640 }
15641 else
15642 {
15643 if (is_add)
15644 do_vfp_nsyn_opcode ("faddd");
15645 else
15646 do_vfp_nsyn_opcode ("fsubd");
15647 }
15648 }
15649
15650 /* Check operand types to see if this is a VFP instruction, and if so call
15651 PFN (). */
15652
15653 static int
15654 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15655 {
15656 enum neon_shape rs;
15657 struct neon_type_el et;
15658
15659 switch (args)
15660 {
15661 case 2:
15662 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15663 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15664 break;
15665
15666 case 3:
15667 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15668 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15669 N_F_ALL | N_KEY | N_VFP);
15670 break;
15671
15672 default:
15673 abort ();
15674 }
15675
15676 if (et.type != NT_invtype)
15677 {
15678 pfn (rs);
15679 return SUCCESS;
15680 }
15681
15682 inst.error = NULL;
15683 return FAIL;
15684 }
15685
15686 static void
15687 do_vfp_nsyn_mla_mls (enum neon_shape rs)
15688 {
15689 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15690
15691 if (rs == NS_FFF || rs == NS_HHH)
15692 {
15693 if (is_mla)
15694 do_vfp_nsyn_opcode ("fmacs");
15695 else
15696 do_vfp_nsyn_opcode ("fnmacs");
15697
15698 /* ARMv8.2 fp16 instruction. */
15699 if (rs == NS_HHH)
15700 do_scalar_fp16_v82_encode ();
15701 }
15702 else
15703 {
15704 if (is_mla)
15705 do_vfp_nsyn_opcode ("fmacd");
15706 else
15707 do_vfp_nsyn_opcode ("fnmacd");
15708 }
15709 }
15710
15711 static void
15712 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15713 {
15714 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15715
15716 if (rs == NS_FFF || rs == NS_HHH)
15717 {
15718 if (is_fma)
15719 do_vfp_nsyn_opcode ("ffmas");
15720 else
15721 do_vfp_nsyn_opcode ("ffnmas");
15722
15723 /* ARMv8.2 fp16 instruction. */
15724 if (rs == NS_HHH)
15725 do_scalar_fp16_v82_encode ();
15726 }
15727 else
15728 {
15729 if (is_fma)
15730 do_vfp_nsyn_opcode ("ffmad");
15731 else
15732 do_vfp_nsyn_opcode ("ffnmad");
15733 }
15734 }
15735
15736 static void
15737 do_vfp_nsyn_mul (enum neon_shape rs)
15738 {
15739 if (rs == NS_FFF || rs == NS_HHH)
15740 {
15741 do_vfp_nsyn_opcode ("fmuls");
15742
15743 /* ARMv8.2 fp16 instruction. */
15744 if (rs == NS_HHH)
15745 do_scalar_fp16_v82_encode ();
15746 }
15747 else
15748 do_vfp_nsyn_opcode ("fmuld");
15749 }
15750
15751 static void
15752 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15753 {
15754 int is_neg = (inst.instruction & 0x80) != 0;
15755 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15756
15757 if (rs == NS_FF || rs == NS_HH)
15758 {
15759 if (is_neg)
15760 do_vfp_nsyn_opcode ("fnegs");
15761 else
15762 do_vfp_nsyn_opcode ("fabss");
15763
15764 /* ARMv8.2 fp16 instruction. */
15765 if (rs == NS_HH)
15766 do_scalar_fp16_v82_encode ();
15767 }
15768 else
15769 {
15770 if (is_neg)
15771 do_vfp_nsyn_opcode ("fnegd");
15772 else
15773 do_vfp_nsyn_opcode ("fabsd");
15774 }
15775 }
15776
15777 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15778 insns belong to Neon, and are handled elsewhere. */
15779
15780 static void
15781 do_vfp_nsyn_ldm_stm (int is_dbmode)
15782 {
15783 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15784 if (is_ldm)
15785 {
15786 if (is_dbmode)
15787 do_vfp_nsyn_opcode ("fldmdbs");
15788 else
15789 do_vfp_nsyn_opcode ("fldmias");
15790 }
15791 else
15792 {
15793 if (is_dbmode)
15794 do_vfp_nsyn_opcode ("fstmdbs");
15795 else
15796 do_vfp_nsyn_opcode ("fstmias");
15797 }
15798 }
15799
15800 static void
15801 do_vfp_nsyn_sqrt (void)
15802 {
15803 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15804 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15805
15806 if (rs == NS_FF || rs == NS_HH)
15807 {
15808 do_vfp_nsyn_opcode ("fsqrts");
15809
15810 /* ARMv8.2 fp16 instruction. */
15811 if (rs == NS_HH)
15812 do_scalar_fp16_v82_encode ();
15813 }
15814 else
15815 do_vfp_nsyn_opcode ("fsqrtd");
15816 }
15817
15818 static void
15819 do_vfp_nsyn_div (void)
15820 {
15821 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15822 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15823 N_F_ALL | N_KEY | N_VFP);
15824
15825 if (rs == NS_FFF || rs == NS_HHH)
15826 {
15827 do_vfp_nsyn_opcode ("fdivs");
15828
15829 /* ARMv8.2 fp16 instruction. */
15830 if (rs == NS_HHH)
15831 do_scalar_fp16_v82_encode ();
15832 }
15833 else
15834 do_vfp_nsyn_opcode ("fdivd");
15835 }
15836
15837 static void
15838 do_vfp_nsyn_nmul (void)
15839 {
15840 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15841 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15842 N_F_ALL | N_KEY | N_VFP);
15843
15844 if (rs == NS_FFF || rs == NS_HHH)
15845 {
15846 NEON_ENCODE (SINGLE, inst);
15847 do_vfp_sp_dyadic ();
15848
15849 /* ARMv8.2 fp16 instruction. */
15850 if (rs == NS_HHH)
15851 do_scalar_fp16_v82_encode ();
15852 }
15853 else
15854 {
15855 NEON_ENCODE (DOUBLE, inst);
15856 do_vfp_dp_rd_rn_rm ();
15857 }
15858 do_vfp_cond_or_thumb ();
15859
15860 }
15861
15862 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15863 (0, 1, 2, 3). */
15864
15865 static unsigned
15866 neon_logbits (unsigned x)
15867 {
15868 return ffs (x) - 4;
15869 }
15870
15871 #define LOW4(R) ((R) & 0xf)
15872 #define HI1(R) (((R) >> 4) & 1)
15873 #define LOW1(R) ((R) & 0x1)
15874 #define HI4(R) (((R) >> 1) & 0xf)
15875
15876 static unsigned
15877 mve_get_vcmp_vpt_cond (struct neon_type_el et)
15878 {
15879 switch (et.type)
15880 {
15881 default:
15882 first_error (BAD_EL_TYPE);
15883 return 0;
15884 case NT_float:
15885 switch (inst.operands[0].imm)
15886 {
15887 default:
15888 first_error (_("invalid condition"));
15889 return 0;
15890 case 0x0:
15891 /* eq. */
15892 return 0;
15893 case 0x1:
15894 /* ne. */
15895 return 1;
15896 case 0xa:
15897 /* ge/ */
15898 return 4;
15899 case 0xb:
15900 /* lt. */
15901 return 5;
15902 case 0xc:
15903 /* gt. */
15904 return 6;
15905 case 0xd:
15906 /* le. */
15907 return 7;
15908 }
15909 case NT_integer:
15910 /* only accept eq and ne. */
15911 if (inst.operands[0].imm > 1)
15912 {
15913 first_error (_("invalid condition"));
15914 return 0;
15915 }
15916 return inst.operands[0].imm;
15917 case NT_unsigned:
15918 if (inst.operands[0].imm == 0x2)
15919 return 2;
15920 else if (inst.operands[0].imm == 0x8)
15921 return 3;
15922 else
15923 {
15924 first_error (_("invalid condition"));
15925 return 0;
15926 }
15927 case NT_signed:
15928 switch (inst.operands[0].imm)
15929 {
15930 default:
15931 first_error (_("invalid condition"));
15932 return 0;
15933 case 0xa:
15934 /* ge. */
15935 return 4;
15936 case 0xb:
15937 /* lt. */
15938 return 5;
15939 case 0xc:
15940 /* gt. */
15941 return 6;
15942 case 0xd:
15943 /* le. */
15944 return 7;
15945 }
15946 }
15947 /* Should be unreachable. */
15948 abort ();
15949 }
15950
15951 /* For VCTP (create vector tail predicate) in MVE. */
15952 static void
15953 do_mve_vctp (void)
15954 {
15955 int dt = 0;
15956 unsigned size = 0x0;
15957
15958 if (inst.cond > COND_ALWAYS)
15959 inst.pred_insn_type = INSIDE_VPT_INSN;
15960 else
15961 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15962
15963 /* This is a typical MVE instruction which has no type but have size 8, 16,
15964 32 and 64. For instructions with no type, inst.vectype.el[j].type is set
15965 to NT_untyped and size is updated in inst.vectype.el[j].size. */
15966 if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15967 dt = inst.vectype.el[0].size;
15968
15969 /* Setting this does not indicate an actual NEON instruction, but only
15970 indicates that the mnemonic accepts neon-style type suffixes. */
15971 inst.is_neon = 1;
15972
15973 switch (dt)
15974 {
15975 case 8:
15976 break;
15977 case 16:
15978 size = 0x1; break;
15979 case 32:
15980 size = 0x2; break;
15981 case 64:
15982 size = 0x3; break;
15983 default:
15984 first_error (_("Type is not allowed for this instruction"));
15985 }
15986 inst.instruction |= size << 20;
15987 inst.instruction |= inst.operands[0].reg << 16;
15988 }
15989
15990 static void
15991 do_mve_vpt (void)
15992 {
15993 /* We are dealing with a vector predicated block. */
15994 if (inst.operands[0].present)
15995 {
15996 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
15997 struct neon_type_el et
15998 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
15999 N_EQK);
16000
16001 unsigned fcond = mve_get_vcmp_vpt_cond (et);
16002
16003 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16004
16005 if (et.type == NT_invtype)
16006 return;
16007
16008 if (et.type == NT_float)
16009 {
16010 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16011 BAD_FPU);
16012 constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
16013 inst.instruction |= (et.size == 16) << 28;
16014 inst.instruction |= 0x3 << 20;
16015 }
16016 else
16017 {
16018 constraint (et.size != 8 && et.size != 16 && et.size != 32,
16019 BAD_EL_TYPE);
16020 inst.instruction |= 1 << 28;
16021 inst.instruction |= neon_logbits (et.size) << 20;
16022 }
16023
16024 if (inst.operands[2].isquad)
16025 {
16026 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16027 inst.instruction |= LOW4 (inst.operands[2].reg);
16028 inst.instruction |= (fcond & 0x2) >> 1;
16029 }
16030 else
16031 {
16032 if (inst.operands[2].reg == REG_SP)
16033 as_tsktsk (MVE_BAD_SP);
16034 inst.instruction |= 1 << 6;
16035 inst.instruction |= (fcond & 0x2) << 4;
16036 inst.instruction |= inst.operands[2].reg;
16037 }
16038 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16039 inst.instruction |= (fcond & 0x4) << 10;
16040 inst.instruction |= (fcond & 0x1) << 7;
16041
16042 }
16043 set_pred_insn_type (VPT_INSN);
16044 now_pred.cc = 0;
16045 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
16046 | ((inst.instruction & 0xe000) >> 13);
16047 now_pred.warn_deprecated = FALSE;
16048 now_pred.type = VECTOR_PRED;
16049 inst.is_neon = 1;
16050 }
16051
16052 static void
16053 do_mve_vcmp (void)
16054 {
16055 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
16056 if (!inst.operands[1].isreg || !inst.operands[1].isquad)
16057 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16058 if (!inst.operands[2].present)
16059 first_error (_("MVE vector or ARM register expected"));
16060 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16061
16062 /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe. */
16063 if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16064 && inst.operands[1].isquad)
16065 {
16066 inst.instruction = N_MNEM_vcmp;
16067 inst.cond = 0x10;
16068 }
16069
16070 if (inst.cond > COND_ALWAYS)
16071 inst.pred_insn_type = INSIDE_VPT_INSN;
16072 else
16073 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16074
16075 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16076 struct neon_type_el et
16077 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16078 N_EQK);
16079
16080 constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16081 && !inst.operands[2].iszr, BAD_PC);
16082
16083 unsigned fcond = mve_get_vcmp_vpt_cond (et);
16084
16085 inst.instruction = 0xee010f00;
16086 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16087 inst.instruction |= (fcond & 0x4) << 10;
16088 inst.instruction |= (fcond & 0x1) << 7;
16089 if (et.type == NT_float)
16090 {
16091 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16092 BAD_FPU);
16093 inst.instruction |= (et.size == 16) << 28;
16094 inst.instruction |= 0x3 << 20;
16095 }
16096 else
16097 {
16098 inst.instruction |= 1 << 28;
16099 inst.instruction |= neon_logbits (et.size) << 20;
16100 }
16101 if (inst.operands[2].isquad)
16102 {
16103 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16104 inst.instruction |= (fcond & 0x2) >> 1;
16105 inst.instruction |= LOW4 (inst.operands[2].reg);
16106 }
16107 else
16108 {
16109 if (inst.operands[2].reg == REG_SP)
16110 as_tsktsk (MVE_BAD_SP);
16111 inst.instruction |= 1 << 6;
16112 inst.instruction |= (fcond & 0x2) << 4;
16113 inst.instruction |= inst.operands[2].reg;
16114 }
16115
16116 inst.is_neon = 1;
16117 return;
16118 }
16119
16120 static void
16121 do_mve_vmaxa_vmina (void)
16122 {
16123 if (inst.cond > COND_ALWAYS)
16124 inst.pred_insn_type = INSIDE_VPT_INSN;
16125 else
16126 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16127
16128 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16129 struct neon_type_el et
16130 = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16131
16132 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16133 inst.instruction |= neon_logbits (et.size) << 18;
16134 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16135 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16136 inst.instruction |= LOW4 (inst.operands[1].reg);
16137 inst.is_neon = 1;
16138 }
16139
16140 static void
16141 do_mve_vfmas (void)
16142 {
16143 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16144 struct neon_type_el et
16145 = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16146
16147 if (inst.cond > COND_ALWAYS)
16148 inst.pred_insn_type = INSIDE_VPT_INSN;
16149 else
16150 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16151
16152 if (inst.operands[2].reg == REG_SP)
16153 as_tsktsk (MVE_BAD_SP);
16154 else if (inst.operands[2].reg == REG_PC)
16155 as_tsktsk (MVE_BAD_PC);
16156
16157 inst.instruction |= (et.size == 16) << 28;
16158 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16159 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16160 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16161 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16162 inst.instruction |= inst.operands[2].reg;
16163 inst.is_neon = 1;
16164 }
16165
16166 static void
16167 do_mve_viddup (void)
16168 {
16169 if (inst.cond > COND_ALWAYS)
16170 inst.pred_insn_type = INSIDE_VPT_INSN;
16171 else
16172 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16173
16174 unsigned imm = inst.relocs[0].exp.X_add_number;
16175 constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16176 _("immediate must be either 1, 2, 4 or 8"));
16177
16178 enum neon_shape rs;
16179 struct neon_type_el et;
16180 unsigned Rm;
16181 if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16182 {
16183 rs = neon_select_shape (NS_QRI, NS_NULL);
16184 et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16185 Rm = 7;
16186 }
16187 else
16188 {
16189 constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16190 if (inst.operands[2].reg == REG_SP)
16191 as_tsktsk (MVE_BAD_SP);
16192 else if (inst.operands[2].reg == REG_PC)
16193 first_error (BAD_PC);
16194
16195 rs = neon_select_shape (NS_QRRI, NS_NULL);
16196 et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16197 Rm = inst.operands[2].reg >> 1;
16198 }
16199 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16200 inst.instruction |= neon_logbits (et.size) << 20;
16201 inst.instruction |= inst.operands[1].reg << 16;
16202 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16203 inst.instruction |= (imm > 2) << 7;
16204 inst.instruction |= Rm << 1;
16205 inst.instruction |= (imm == 2 || imm == 8);
16206 inst.is_neon = 1;
16207 }
16208
16209 static void
16210 do_mve_vmlas (void)
16211 {
16212 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16213 struct neon_type_el et
16214 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16215
16216 if (inst.operands[2].reg == REG_PC)
16217 as_tsktsk (MVE_BAD_PC);
16218 else if (inst.operands[2].reg == REG_SP)
16219 as_tsktsk (MVE_BAD_SP);
16220
16221 if (inst.cond > COND_ALWAYS)
16222 inst.pred_insn_type = INSIDE_VPT_INSN;
16223 else
16224 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16225
16226 inst.instruction |= (et.type == NT_unsigned) << 28;
16227 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16228 inst.instruction |= neon_logbits (et.size) << 20;
16229 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16230 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16231 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16232 inst.instruction |= inst.operands[2].reg;
16233 inst.is_neon = 1;
16234 }
16235
16236 static void
16237 do_mve_vshll (void)
16238 {
16239 struct neon_type_el et
16240 = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16241
16242 if (inst.cond > COND_ALWAYS)
16243 inst.pred_insn_type = INSIDE_VPT_INSN;
16244 else
16245 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16246
16247 int imm = inst.operands[2].imm;
16248 constraint (imm < 1 || (unsigned)imm > et.size,
16249 _("immediate value out of range"));
16250
16251 if ((unsigned)imm == et.size)
16252 {
16253 inst.instruction |= neon_logbits (et.size) << 18;
16254 inst.instruction |= 0x110001;
16255 }
16256 else
16257 {
16258 inst.instruction |= (et.size + imm) << 16;
16259 inst.instruction |= 0x800140;
16260 }
16261
16262 inst.instruction |= (et.type == NT_unsigned) << 28;
16263 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16264 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16265 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16266 inst.instruction |= LOW4 (inst.operands[1].reg);
16267 inst.is_neon = 1;
16268 }
16269
16270 static void
16271 do_mve_vshlc (void)
16272 {
16273 if (inst.cond > COND_ALWAYS)
16274 inst.pred_insn_type = INSIDE_VPT_INSN;
16275 else
16276 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16277
16278 if (inst.operands[1].reg == REG_PC)
16279 as_tsktsk (MVE_BAD_PC);
16280 else if (inst.operands[1].reg == REG_SP)
16281 as_tsktsk (MVE_BAD_SP);
16282
16283 int imm = inst.operands[2].imm;
16284 constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16285
16286 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16287 inst.instruction |= (imm & 0x1f) << 16;
16288 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16289 inst.instruction |= inst.operands[1].reg;
16290 inst.is_neon = 1;
16291 }
16292
16293 static void
16294 do_mve_vshrn (void)
16295 {
16296 unsigned types;
16297 switch (inst.instruction)
16298 {
16299 case M_MNEM_vshrnt:
16300 case M_MNEM_vshrnb:
16301 case M_MNEM_vrshrnt:
16302 case M_MNEM_vrshrnb:
16303 types = N_I16 | N_I32;
16304 break;
16305 case M_MNEM_vqshrnt:
16306 case M_MNEM_vqshrnb:
16307 case M_MNEM_vqrshrnt:
16308 case M_MNEM_vqrshrnb:
16309 types = N_U16 | N_U32 | N_S16 | N_S32;
16310 break;
16311 case M_MNEM_vqshrunt:
16312 case M_MNEM_vqshrunb:
16313 case M_MNEM_vqrshrunt:
16314 case M_MNEM_vqrshrunb:
16315 types = N_S16 | N_S32;
16316 break;
16317 default:
16318 abort ();
16319 }
16320
16321 struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16322
16323 if (inst.cond > COND_ALWAYS)
16324 inst.pred_insn_type = INSIDE_VPT_INSN;
16325 else
16326 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16327
16328 unsigned Qd = inst.operands[0].reg;
16329 unsigned Qm = inst.operands[1].reg;
16330 unsigned imm = inst.operands[2].imm;
16331 constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16332 et.size == 16
16333 ? _("immediate operand expected in the range [1,8]")
16334 : _("immediate operand expected in the range [1,16]"));
16335
16336 inst.instruction |= (et.type == NT_unsigned) << 28;
16337 inst.instruction |= HI1 (Qd) << 22;
16338 inst.instruction |= (et.size - imm) << 16;
16339 inst.instruction |= LOW4 (Qd) << 12;
16340 inst.instruction |= HI1 (Qm) << 5;
16341 inst.instruction |= LOW4 (Qm);
16342 inst.is_neon = 1;
16343 }
16344
16345 static void
16346 do_mve_vqmovn (void)
16347 {
16348 struct neon_type_el et;
16349 if (inst.instruction == M_MNEM_vqmovnt
16350 || inst.instruction == M_MNEM_vqmovnb)
16351 et = neon_check_type (2, NS_QQ, N_EQK,
16352 N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16353 else
16354 et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16355
16356 if (inst.cond > COND_ALWAYS)
16357 inst.pred_insn_type = INSIDE_VPT_INSN;
16358 else
16359 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16360
16361 inst.instruction |= (et.type == NT_unsigned) << 28;
16362 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16363 inst.instruction |= (et.size == 32) << 18;
16364 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16365 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16366 inst.instruction |= LOW4 (inst.operands[1].reg);
16367 inst.is_neon = 1;
16368 }
16369
16370 static void
16371 do_mve_vpsel (void)
16372 {
16373 neon_select_shape (NS_QQQ, NS_NULL);
16374
16375 if (inst.cond > COND_ALWAYS)
16376 inst.pred_insn_type = INSIDE_VPT_INSN;
16377 else
16378 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16379
16380 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16381 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16382 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16383 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16384 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16385 inst.instruction |= LOW4 (inst.operands[2].reg);
16386 inst.is_neon = 1;
16387 }
16388
16389 static void
16390 do_mve_vpnot (void)
16391 {
16392 if (inst.cond > COND_ALWAYS)
16393 inst.pred_insn_type = INSIDE_VPT_INSN;
16394 else
16395 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16396 }
16397
16398 static void
16399 do_mve_vmaxnma_vminnma (void)
16400 {
16401 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16402 struct neon_type_el et
16403 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16404
16405 if (inst.cond > COND_ALWAYS)
16406 inst.pred_insn_type = INSIDE_VPT_INSN;
16407 else
16408 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16409
16410 inst.instruction |= (et.size == 16) << 28;
16411 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16412 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16413 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16414 inst.instruction |= LOW4 (inst.operands[1].reg);
16415 inst.is_neon = 1;
16416 }
16417
16418 static void
16419 do_mve_vcmul (void)
16420 {
16421 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16422 struct neon_type_el et
16423 = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16424
16425 if (inst.cond > COND_ALWAYS)
16426 inst.pred_insn_type = INSIDE_VPT_INSN;
16427 else
16428 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16429
16430 unsigned rot = inst.relocs[0].exp.X_add_number;
16431 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16432 _("immediate out of range"));
16433
16434 if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16435 || inst.operands[0].reg == inst.operands[2].reg))
16436 as_tsktsk (BAD_MVE_SRCDEST);
16437
16438 inst.instruction |= (et.size == 32) << 28;
16439 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16440 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16441 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16442 inst.instruction |= (rot > 90) << 12;
16443 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16444 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16445 inst.instruction |= LOW4 (inst.operands[2].reg);
16446 inst.instruction |= (rot == 90 || rot == 270);
16447 inst.is_neon = 1;
16448 }
16449
16450 /* To handle the Low Overhead Loop instructions
16451 in Armv8.1-M Mainline and MVE. */
16452 static void
16453 do_t_loloop (void)
16454 {
16455 unsigned long insn = inst.instruction;
16456
16457 inst.instruction = THUMB_OP32 (inst.instruction);
16458
16459 if (insn == T_MNEM_lctp)
16460 return;
16461
16462 set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16463
16464 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16465 {
16466 struct neon_type_el et
16467 = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16468 inst.instruction |= neon_logbits (et.size) << 20;
16469 inst.is_neon = 1;
16470 }
16471
16472 switch (insn)
16473 {
16474 case T_MNEM_letp:
16475 constraint (!inst.operands[0].present,
16476 _("expected LR"));
16477 /* fall through. */
16478 case T_MNEM_le:
16479 /* le <label>. */
16480 if (!inst.operands[0].present)
16481 inst.instruction |= 1 << 21;
16482
16483 v8_1_loop_reloc (TRUE);
16484 break;
16485
16486 case T_MNEM_wls:
16487 case T_MNEM_wlstp:
16488 v8_1_loop_reloc (FALSE);
16489 /* fall through. */
16490 case T_MNEM_dlstp:
16491 case T_MNEM_dls:
16492 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16493
16494 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16495 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16496 else if (inst.operands[1].reg == REG_PC)
16497 as_tsktsk (MVE_BAD_PC);
16498 if (inst.operands[1].reg == REG_SP)
16499 as_tsktsk (MVE_BAD_SP);
16500
16501 inst.instruction |= (inst.operands[1].reg << 16);
16502 break;
16503
16504 default:
16505 abort ();
16506 }
16507 }
16508
16509
16510 static void
16511 do_vfp_nsyn_cmp (void)
16512 {
16513 enum neon_shape rs;
16514 if (!inst.operands[0].isreg)
16515 {
16516 do_mve_vcmp ();
16517 return;
16518 }
16519 else
16520 {
16521 constraint (inst.operands[2].present, BAD_SYNTAX);
16522 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16523 BAD_FPU);
16524 }
16525
16526 if (inst.operands[1].isreg)
16527 {
16528 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16529 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16530
16531 if (rs == NS_FF || rs == NS_HH)
16532 {
16533 NEON_ENCODE (SINGLE, inst);
16534 do_vfp_sp_monadic ();
16535 }
16536 else
16537 {
16538 NEON_ENCODE (DOUBLE, inst);
16539 do_vfp_dp_rd_rm ();
16540 }
16541 }
16542 else
16543 {
16544 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16545 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16546
16547 switch (inst.instruction & 0x0fffffff)
16548 {
16549 case N_MNEM_vcmp:
16550 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16551 break;
16552 case N_MNEM_vcmpe:
16553 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16554 break;
16555 default:
16556 abort ();
16557 }
16558
16559 if (rs == NS_FI || rs == NS_HI)
16560 {
16561 NEON_ENCODE (SINGLE, inst);
16562 do_vfp_sp_compare_z ();
16563 }
16564 else
16565 {
16566 NEON_ENCODE (DOUBLE, inst);
16567 do_vfp_dp_rd ();
16568 }
16569 }
16570 do_vfp_cond_or_thumb ();
16571
16572 /* ARMv8.2 fp16 instruction. */
16573 if (rs == NS_HI || rs == NS_HH)
16574 do_scalar_fp16_v82_encode ();
16575 }
16576
16577 static void
16578 nsyn_insert_sp (void)
16579 {
16580 inst.operands[1] = inst.operands[0];
16581 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16582 inst.operands[0].reg = REG_SP;
16583 inst.operands[0].isreg = 1;
16584 inst.operands[0].writeback = 1;
16585 inst.operands[0].present = 1;
16586 }
16587
16588 /* Fix up Neon data-processing instructions, ORing in the correct bits for
16589 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
16590
16591 static void
16592 neon_dp_fixup (struct arm_it* insn)
16593 {
16594 unsigned int i = insn->instruction;
16595 insn->is_neon = 1;
16596
16597 if (thumb_mode)
16598 {
16599 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
16600 if (i & (1 << 24))
16601 i |= 1 << 28;
16602
16603 i &= ~(1 << 24);
16604
16605 i |= 0xef000000;
16606 }
16607 else
16608 i |= 0xf2000000;
16609
16610 insn->instruction = i;
16611 }
16612
16613 static void
16614 mve_encode_qqr (int size, int U, int fp)
16615 {
16616 if (inst.operands[2].reg == REG_SP)
16617 as_tsktsk (MVE_BAD_SP);
16618 else if (inst.operands[2].reg == REG_PC)
16619 as_tsktsk (MVE_BAD_PC);
16620
16621 if (fp)
16622 {
16623 /* vadd. */
16624 if (((unsigned)inst.instruction) == 0xd00)
16625 inst.instruction = 0xee300f40;
16626 /* vsub. */
16627 else if (((unsigned)inst.instruction) == 0x200d00)
16628 inst.instruction = 0xee301f40;
16629 /* vmul. */
16630 else if (((unsigned)inst.instruction) == 0x1000d10)
16631 inst.instruction = 0xee310e60;
16632
16633 /* Setting size which is 1 for F16 and 0 for F32. */
16634 inst.instruction |= (size == 16) << 28;
16635 }
16636 else
16637 {
16638 /* vadd. */
16639 if (((unsigned)inst.instruction) == 0x800)
16640 inst.instruction = 0xee010f40;
16641 /* vsub. */
16642 else if (((unsigned)inst.instruction) == 0x1000800)
16643 inst.instruction = 0xee011f40;
16644 /* vhadd. */
16645 else if (((unsigned)inst.instruction) == 0)
16646 inst.instruction = 0xee000f40;
16647 /* vhsub. */
16648 else if (((unsigned)inst.instruction) == 0x200)
16649 inst.instruction = 0xee001f40;
16650 /* vmla. */
16651 else if (((unsigned)inst.instruction) == 0x900)
16652 inst.instruction = 0xee010e40;
16653 /* vmul. */
16654 else if (((unsigned)inst.instruction) == 0x910)
16655 inst.instruction = 0xee011e60;
16656 /* vqadd. */
16657 else if (((unsigned)inst.instruction) == 0x10)
16658 inst.instruction = 0xee000f60;
16659 /* vqsub. */
16660 else if (((unsigned)inst.instruction) == 0x210)
16661 inst.instruction = 0xee001f60;
16662 /* vqrdmlah. */
16663 else if (((unsigned)inst.instruction) == 0x3000b10)
16664 inst.instruction = 0xee000e40;
16665 /* vqdmulh. */
16666 else if (((unsigned)inst.instruction) == 0x0000b00)
16667 inst.instruction = 0xee010e60;
16668 /* vqrdmulh. */
16669 else if (((unsigned)inst.instruction) == 0x1000b00)
16670 inst.instruction = 0xfe010e60;
16671
16672 /* Set U-bit. */
16673 inst.instruction |= U << 28;
16674
16675 /* Setting bits for size. */
16676 inst.instruction |= neon_logbits (size) << 20;
16677 }
16678 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16679 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16680 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16681 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16682 inst.instruction |= inst.operands[2].reg;
16683 inst.is_neon = 1;
16684 }
16685
16686 static void
16687 mve_encode_rqq (unsigned bit28, unsigned size)
16688 {
16689 inst.instruction |= bit28 << 28;
16690 inst.instruction |= neon_logbits (size) << 20;
16691 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16692 inst.instruction |= inst.operands[0].reg << 12;
16693 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16694 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16695 inst.instruction |= LOW4 (inst.operands[2].reg);
16696 inst.is_neon = 1;
16697 }
16698
16699 static void
16700 mve_encode_qqq (int ubit, int size)
16701 {
16702
16703 inst.instruction |= (ubit != 0) << 28;
16704 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16705 inst.instruction |= neon_logbits (size) << 20;
16706 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16707 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16708 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16709 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16710 inst.instruction |= LOW4 (inst.operands[2].reg);
16711
16712 inst.is_neon = 1;
16713 }
16714
16715 static void
16716 mve_encode_rq (unsigned bit28, unsigned size)
16717 {
16718 inst.instruction |= bit28 << 28;
16719 inst.instruction |= neon_logbits (size) << 18;
16720 inst.instruction |= inst.operands[0].reg << 12;
16721 inst.instruction |= LOW4 (inst.operands[1].reg);
16722 inst.is_neon = 1;
16723 }
16724
16725 static void
16726 mve_encode_rrqq (unsigned U, unsigned size)
16727 {
16728 constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16729
16730 inst.instruction |= U << 28;
16731 inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16732 inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16733 inst.instruction |= (size == 32) << 16;
16734 inst.instruction |= inst.operands[0].reg << 12;
16735 inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16736 inst.instruction |= inst.operands[3].reg;
16737 inst.is_neon = 1;
16738 }
16739
16740 /* Helper function for neon_three_same handling the operands. */
16741 static void
16742 neon_three_args (int isquad)
16743 {
16744 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16745 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16746 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16747 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16748 inst.instruction |= LOW4 (inst.operands[2].reg);
16749 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16750 inst.instruction |= (isquad != 0) << 6;
16751 inst.is_neon = 1;
16752 }
16753
16754 /* Encode insns with bit pattern:
16755
16756 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16757 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
16758
16759 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16760 different meaning for some instruction. */
16761
16762 static void
16763 neon_three_same (int isquad, int ubit, int size)
16764 {
16765 neon_three_args (isquad);
16766 inst.instruction |= (ubit != 0) << 24;
16767 if (size != -1)
16768 inst.instruction |= neon_logbits (size) << 20;
16769
16770 neon_dp_fixup (&inst);
16771 }
16772
16773 /* Encode instructions of the form:
16774
16775 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
16776 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
16777
16778 Don't write size if SIZE == -1. */
16779
16780 static void
16781 neon_two_same (int qbit, int ubit, int size)
16782 {
16783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16785 inst.instruction |= LOW4 (inst.operands[1].reg);
16786 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16787 inst.instruction |= (qbit != 0) << 6;
16788 inst.instruction |= (ubit != 0) << 24;
16789
16790 if (size != -1)
16791 inst.instruction |= neon_logbits (size) << 18;
16792
16793 neon_dp_fixup (&inst);
16794 }
16795
16796 enum vfp_or_neon_is_neon_bits
16797 {
16798 NEON_CHECK_CC = 1,
16799 NEON_CHECK_ARCH = 2,
16800 NEON_CHECK_ARCH8 = 4
16801 };
16802
16803 /* Call this function if an instruction which may have belonged to the VFP or
16804 Neon instruction sets, but turned out to be a Neon instruction (due to the
16805 operand types involved, etc.). We have to check and/or fix-up a couple of
16806 things:
16807
16808 - Make sure the user hasn't attempted to make a Neon instruction
16809 conditional.
16810 - Alter the value in the condition code field if necessary.
16811 - Make sure that the arch supports Neon instructions.
16812
16813 Which of these operations take place depends on bits from enum
16814 vfp_or_neon_is_neon_bits.
16815
16816 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16817 current instruction's condition is COND_ALWAYS, the condition field is
16818 changed to inst.uncond_value. This is necessary because instructions shared
16819 between VFP and Neon may be conditional for the VFP variants only, and the
16820 unconditional Neon version must have, e.g., 0xF in the condition field. */
16821
16822 static int
16823 vfp_or_neon_is_neon (unsigned check)
16824 {
16825 /* Conditions are always legal in Thumb mode (IT blocks). */
16826 if (!thumb_mode && (check & NEON_CHECK_CC))
16827 {
16828 if (inst.cond != COND_ALWAYS)
16829 {
16830 first_error (_(BAD_COND));
16831 return FAIL;
16832 }
16833 if (inst.uncond_value != -1)
16834 inst.instruction |= inst.uncond_value << 28;
16835 }
16836
16837
16838 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16839 || ((check & NEON_CHECK_ARCH8)
16840 && !mark_feature_used (&fpu_neon_ext_armv8)))
16841 {
16842 first_error (_(BAD_FPU));
16843 return FAIL;
16844 }
16845
16846 return SUCCESS;
16847 }
16848
16849
16850 /* Return TRUE if the SIMD instruction is available for the current
16851 cpu_variant. FP is set to TRUE if this is a SIMD floating-point
16852 instruction. CHECK contains th. CHECK contains the set of bits to pass to
16853 vfp_or_neon_is_neon for the NEON specific checks. */
16854
16855 static bfd_boolean
16856 check_simd_pred_availability (int fp, unsigned check)
16857 {
16858 if (inst.cond > COND_ALWAYS)
16859 {
16860 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16861 {
16862 inst.error = BAD_FPU;
16863 return FALSE;
16864 }
16865 inst.pred_insn_type = INSIDE_VPT_INSN;
16866 }
16867 else if (inst.cond < COND_ALWAYS)
16868 {
16869 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16870 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16871 else if (vfp_or_neon_is_neon (check) == FAIL)
16872 return FALSE;
16873 }
16874 else
16875 {
16876 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16877 && vfp_or_neon_is_neon (check) == FAIL)
16878 return FALSE;
16879
16880 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16881 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16882 }
16883 return TRUE;
16884 }
16885
16886 /* Neon instruction encoders, in approximate order of appearance. */
16887
16888 static void
16889 do_neon_dyadic_i_su (void)
16890 {
16891 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16892 return;
16893
16894 enum neon_shape rs;
16895 struct neon_type_el et;
16896 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16897 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16898 else
16899 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16900
16901 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16902
16903
16904 if (rs != NS_QQR)
16905 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16906 else
16907 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16908 }
16909
16910 static void
16911 do_neon_dyadic_i64_su (void)
16912 {
16913 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
16914 return;
16915 enum neon_shape rs;
16916 struct neon_type_el et;
16917 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16918 {
16919 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16920 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16921 }
16922 else
16923 {
16924 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16925 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16926 }
16927 if (rs == NS_QQR)
16928 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16929 else
16930 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16931 }
16932
16933 static void
16934 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16935 unsigned immbits)
16936 {
16937 unsigned size = et.size >> 3;
16938 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16939 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16940 inst.instruction |= LOW4 (inst.operands[1].reg);
16941 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16942 inst.instruction |= (isquad != 0) << 6;
16943 inst.instruction |= immbits << 16;
16944 inst.instruction |= (size >> 3) << 7;
16945 inst.instruction |= (size & 0x7) << 19;
16946 if (write_ubit)
16947 inst.instruction |= (uval != 0) << 24;
16948
16949 neon_dp_fixup (&inst);
16950 }
16951
16952 static void
16953 do_neon_shl (void)
16954 {
16955 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16956 return;
16957
16958 if (!inst.operands[2].isreg)
16959 {
16960 enum neon_shape rs;
16961 struct neon_type_el et;
16962 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16963 {
16964 rs = neon_select_shape (NS_QQI, NS_NULL);
16965 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16966 }
16967 else
16968 {
16969 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16970 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16971 }
16972 int imm = inst.operands[2].imm;
16973
16974 constraint (imm < 0 || (unsigned)imm >= et.size,
16975 _("immediate out of range for shift"));
16976 NEON_ENCODE (IMMED, inst);
16977 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16978 }
16979 else
16980 {
16981 enum neon_shape rs;
16982 struct neon_type_el et;
16983 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16984 {
16985 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16986 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16987 }
16988 else
16989 {
16990 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16991 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16992 }
16993
16994
16995 if (rs == NS_QQR)
16996 {
16997 constraint (inst.operands[0].reg != inst.operands[1].reg,
16998 _("invalid instruction shape"));
16999 if (inst.operands[2].reg == REG_SP)
17000 as_tsktsk (MVE_BAD_SP);
17001 else if (inst.operands[2].reg == REG_PC)
17002 as_tsktsk (MVE_BAD_PC);
17003
17004 inst.instruction = 0xee311e60;
17005 inst.instruction |= (et.type == NT_unsigned) << 28;
17006 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17007 inst.instruction |= neon_logbits (et.size) << 18;
17008 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17009 inst.instruction |= inst.operands[2].reg;
17010 inst.is_neon = 1;
17011 }
17012 else
17013 {
17014 unsigned int tmp;
17015
17016 /* VSHL/VQSHL 3-register variants have syntax such as:
17017 vshl.xx Dd, Dm, Dn
17018 whereas other 3-register operations encoded by neon_three_same have
17019 syntax like:
17020 vadd.xx Dd, Dn, Dm
17021 (i.e. with Dn & Dm reversed). Swap operands[1].reg and
17022 operands[2].reg here. */
17023 tmp = inst.operands[2].reg;
17024 inst.operands[2].reg = inst.operands[1].reg;
17025 inst.operands[1].reg = tmp;
17026 NEON_ENCODE (INTEGER, inst);
17027 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17028 }
17029 }
17030 }
17031
17032 static void
17033 do_neon_qshl (void)
17034 {
17035 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17036 return;
17037
17038 if (!inst.operands[2].isreg)
17039 {
17040 enum neon_shape rs;
17041 struct neon_type_el et;
17042 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17043 {
17044 rs = neon_select_shape (NS_QQI, NS_NULL);
17045 et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
17046 }
17047 else
17048 {
17049 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17050 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17051 }
17052 int imm = inst.operands[2].imm;
17053
17054 constraint (imm < 0 || (unsigned)imm >= et.size,
17055 _("immediate out of range for shift"));
17056 NEON_ENCODE (IMMED, inst);
17057 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
17058 }
17059 else
17060 {
17061 enum neon_shape rs;
17062 struct neon_type_el et;
17063
17064 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17065 {
17066 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17067 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17068 }
17069 else
17070 {
17071 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17072 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17073 }
17074
17075 if (rs == NS_QQR)
17076 {
17077 constraint (inst.operands[0].reg != inst.operands[1].reg,
17078 _("invalid instruction shape"));
17079 if (inst.operands[2].reg == REG_SP)
17080 as_tsktsk (MVE_BAD_SP);
17081 else if (inst.operands[2].reg == REG_PC)
17082 as_tsktsk (MVE_BAD_PC);
17083
17084 inst.instruction = 0xee311ee0;
17085 inst.instruction |= (et.type == NT_unsigned) << 28;
17086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17087 inst.instruction |= neon_logbits (et.size) << 18;
17088 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17089 inst.instruction |= inst.operands[2].reg;
17090 inst.is_neon = 1;
17091 }
17092 else
17093 {
17094 unsigned int tmp;
17095
17096 /* See note in do_neon_shl. */
17097 tmp = inst.operands[2].reg;
17098 inst.operands[2].reg = inst.operands[1].reg;
17099 inst.operands[1].reg = tmp;
17100 NEON_ENCODE (INTEGER, inst);
17101 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17102 }
17103 }
17104 }
17105
17106 static void
17107 do_neon_rshl (void)
17108 {
17109 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17110 return;
17111
17112 enum neon_shape rs;
17113 struct neon_type_el et;
17114 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17115 {
17116 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17117 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17118 }
17119 else
17120 {
17121 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17122 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17123 }
17124
17125 unsigned int tmp;
17126
17127 if (rs == NS_QQR)
17128 {
17129 if (inst.operands[2].reg == REG_PC)
17130 as_tsktsk (MVE_BAD_PC);
17131 else if (inst.operands[2].reg == REG_SP)
17132 as_tsktsk (MVE_BAD_SP);
17133
17134 constraint (inst.operands[0].reg != inst.operands[1].reg,
17135 _("invalid instruction shape"));
17136
17137 if (inst.instruction == 0x0000510)
17138 /* We are dealing with vqrshl. */
17139 inst.instruction = 0xee331ee0;
17140 else
17141 /* We are dealing with vrshl. */
17142 inst.instruction = 0xee331e60;
17143
17144 inst.instruction |= (et.type == NT_unsigned) << 28;
17145 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17146 inst.instruction |= neon_logbits (et.size) << 18;
17147 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17148 inst.instruction |= inst.operands[2].reg;
17149 inst.is_neon = 1;
17150 }
17151 else
17152 {
17153 tmp = inst.operands[2].reg;
17154 inst.operands[2].reg = inst.operands[1].reg;
17155 inst.operands[1].reg = tmp;
17156 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17157 }
17158 }
17159
17160 static int
17161 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17162 {
17163 /* Handle .I8 pseudo-instructions. */
17164 if (size == 8)
17165 {
17166 /* Unfortunately, this will make everything apart from zero out-of-range.
17167 FIXME is this the intended semantics? There doesn't seem much point in
17168 accepting .I8 if so. */
17169 immediate |= immediate << 8;
17170 size = 16;
17171 }
17172
17173 if (size >= 32)
17174 {
17175 if (immediate == (immediate & 0x000000ff))
17176 {
17177 *immbits = immediate;
17178 return 0x1;
17179 }
17180 else if (immediate == (immediate & 0x0000ff00))
17181 {
17182 *immbits = immediate >> 8;
17183 return 0x3;
17184 }
17185 else if (immediate == (immediate & 0x00ff0000))
17186 {
17187 *immbits = immediate >> 16;
17188 return 0x5;
17189 }
17190 else if (immediate == (immediate & 0xff000000))
17191 {
17192 *immbits = immediate >> 24;
17193 return 0x7;
17194 }
17195 if ((immediate & 0xffff) != (immediate >> 16))
17196 goto bad_immediate;
17197 immediate &= 0xffff;
17198 }
17199
17200 if (immediate == (immediate & 0x000000ff))
17201 {
17202 *immbits = immediate;
17203 return 0x9;
17204 }
17205 else if (immediate == (immediate & 0x0000ff00))
17206 {
17207 *immbits = immediate >> 8;
17208 return 0xb;
17209 }
17210
17211 bad_immediate:
17212 first_error (_("immediate value out of range"));
17213 return FAIL;
17214 }
17215
17216 static void
17217 do_neon_logic (void)
17218 {
17219 if (inst.operands[2].present && inst.operands[2].isreg)
17220 {
17221 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17222 if (rs == NS_QQQ
17223 && !check_simd_pred_availability (FALSE,
17224 NEON_CHECK_ARCH | NEON_CHECK_CC))
17225 return;
17226 else if (rs != NS_QQQ
17227 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17228 first_error (BAD_FPU);
17229
17230 neon_check_type (3, rs, N_IGNORE_TYPE);
17231 /* U bit and size field were set as part of the bitmask. */
17232 NEON_ENCODE (INTEGER, inst);
17233 neon_three_same (neon_quad (rs), 0, -1);
17234 }
17235 else
17236 {
17237 const int three_ops_form = (inst.operands[2].present
17238 && !inst.operands[2].isreg);
17239 const int immoperand = (three_ops_form ? 2 : 1);
17240 enum neon_shape rs = (three_ops_form
17241 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17242 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17243 /* Because neon_select_shape makes the second operand a copy of the first
17244 if the second operand is not present. */
17245 if (rs == NS_QQI
17246 && !check_simd_pred_availability (FALSE,
17247 NEON_CHECK_ARCH | NEON_CHECK_CC))
17248 return;
17249 else if (rs != NS_QQI
17250 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17251 first_error (BAD_FPU);
17252
17253 struct neon_type_el et;
17254 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17255 et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17256 else
17257 et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17258 | N_KEY, N_EQK);
17259
17260 if (et.type == NT_invtype)
17261 return;
17262 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17263 unsigned immbits;
17264 int cmode;
17265
17266
17267 if (three_ops_form)
17268 constraint (inst.operands[0].reg != inst.operands[1].reg,
17269 _("first and second operands shall be the same register"));
17270
17271 NEON_ENCODE (IMMED, inst);
17272
17273 immbits = inst.operands[immoperand].imm;
17274 if (et.size == 64)
17275 {
17276 /* .i64 is a pseudo-op, so the immediate must be a repeating
17277 pattern. */
17278 if (immbits != (inst.operands[immoperand].regisimm ?
17279 inst.operands[immoperand].reg : 0))
17280 {
17281 /* Set immbits to an invalid constant. */
17282 immbits = 0xdeadbeef;
17283 }
17284 }
17285
17286 switch (opcode)
17287 {
17288 case N_MNEM_vbic:
17289 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17290 break;
17291
17292 case N_MNEM_vorr:
17293 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17294 break;
17295
17296 case N_MNEM_vand:
17297 /* Pseudo-instruction for VBIC. */
17298 neon_invert_size (&immbits, 0, et.size);
17299 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17300 break;
17301
17302 case N_MNEM_vorn:
17303 /* Pseudo-instruction for VORR. */
17304 neon_invert_size (&immbits, 0, et.size);
17305 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17306 break;
17307
17308 default:
17309 abort ();
17310 }
17311
17312 if (cmode == FAIL)
17313 return;
17314
17315 inst.instruction |= neon_quad (rs) << 6;
17316 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17317 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17318 inst.instruction |= cmode << 8;
17319 neon_write_immbits (immbits);
17320
17321 neon_dp_fixup (&inst);
17322 }
17323 }
17324
17325 static void
17326 do_neon_bitfield (void)
17327 {
17328 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17329 neon_check_type (3, rs, N_IGNORE_TYPE);
17330 neon_three_same (neon_quad (rs), 0, -1);
17331 }
17332
17333 static void
17334 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17335 unsigned destbits)
17336 {
17337 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17338 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17339 types | N_KEY);
17340 if (et.type == NT_float)
17341 {
17342 NEON_ENCODE (FLOAT, inst);
17343 if (rs == NS_QQR)
17344 mve_encode_qqr (et.size, 0, 1);
17345 else
17346 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17347 }
17348 else
17349 {
17350 NEON_ENCODE (INTEGER, inst);
17351 if (rs == NS_QQR)
17352 mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17353 else
17354 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17355 }
17356 }
17357
17358
17359 static void
17360 do_neon_dyadic_if_su_d (void)
17361 {
17362 /* This version only allow D registers, but that constraint is enforced during
17363 operand parsing so we don't need to do anything extra here. */
17364 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17365 }
17366
17367 static void
17368 do_neon_dyadic_if_i_d (void)
17369 {
17370 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17371 affected if we specify unsigned args. */
17372 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17373 }
17374
17375 static void
17376 do_mve_vstr_vldr_QI (int size, int elsize, int load)
17377 {
17378 constraint (size < 32, BAD_ADDR_MODE);
17379 constraint (size != elsize, BAD_EL_TYPE);
17380 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17381 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17382 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17383 _("destination register and offset register may not be the"
17384 " same"));
17385
17386 int imm = inst.relocs[0].exp.X_add_number;
17387 int add = 1;
17388 if (imm < 0)
17389 {
17390 add = 0;
17391 imm = -imm;
17392 }
17393 constraint ((imm % (size / 8) != 0)
17394 || imm > (0x7f << neon_logbits (size)),
17395 (size == 32) ? _("immediate must be a multiple of 4 in the"
17396 " range of +/-[0,508]")
17397 : _("immediate must be a multiple of 8 in the"
17398 " range of +/-[0,1016]"));
17399 inst.instruction |= 0x11 << 24;
17400 inst.instruction |= add << 23;
17401 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17402 inst.instruction |= inst.operands[1].writeback << 21;
17403 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17404 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17405 inst.instruction |= 1 << 12;
17406 inst.instruction |= (size == 64) << 8;
17407 inst.instruction &= 0xffffff00;
17408 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17409 inst.instruction |= imm >> neon_logbits (size);
17410 }
17411
17412 static void
17413 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17414 {
17415 unsigned os = inst.operands[1].imm >> 5;
17416 unsigned type = inst.vectype.el[0].type;
17417 constraint (os != 0 && size == 8,
17418 _("can not shift offsets when accessing less than half-word"));
17419 constraint (os && os != neon_logbits (size),
17420 _("shift immediate must be 1, 2 or 3 for half-word, word"
17421 " or double-word accesses respectively"));
17422 if (inst.operands[1].reg == REG_PC)
17423 as_tsktsk (MVE_BAD_PC);
17424
17425 switch (size)
17426 {
17427 case 8:
17428 constraint (elsize >= 64, BAD_EL_TYPE);
17429 break;
17430 case 16:
17431 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17432 break;
17433 case 32:
17434 case 64:
17435 constraint (elsize != size, BAD_EL_TYPE);
17436 break;
17437 default:
17438 break;
17439 }
17440 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17441 BAD_ADDR_MODE);
17442 if (load)
17443 {
17444 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17445 _("destination register and offset register may not be"
17446 " the same"));
17447 constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17448 constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17449 BAD_EL_TYPE);
17450 inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17451 }
17452 else
17453 {
17454 constraint (type != NT_untyped, BAD_EL_TYPE);
17455 }
17456
17457 inst.instruction |= 1 << 23;
17458 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17459 inst.instruction |= inst.operands[1].reg << 16;
17460 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17461 inst.instruction |= neon_logbits (elsize) << 7;
17462 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17463 inst.instruction |= LOW4 (inst.operands[1].imm);
17464 inst.instruction |= !!os;
17465 }
17466
17467 static void
17468 do_mve_vstr_vldr_RI (int size, int elsize, int load)
17469 {
17470 enum neon_el_type type = inst.vectype.el[0].type;
17471
17472 constraint (size >= 64, BAD_ADDR_MODE);
17473 switch (size)
17474 {
17475 case 16:
17476 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17477 break;
17478 case 32:
17479 constraint (elsize != size, BAD_EL_TYPE);
17480 break;
17481 default:
17482 break;
17483 }
17484 if (load)
17485 {
17486 constraint (elsize != size && type != NT_unsigned
17487 && type != NT_signed, BAD_EL_TYPE);
17488 }
17489 else
17490 {
17491 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17492 }
17493
17494 int imm = inst.relocs[0].exp.X_add_number;
17495 int add = 1;
17496 if (imm < 0)
17497 {
17498 add = 0;
17499 imm = -imm;
17500 }
17501
17502 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17503 {
17504 switch (size)
17505 {
17506 case 8:
17507 constraint (1, _("immediate must be in the range of +/-[0,127]"));
17508 break;
17509 case 16:
17510 constraint (1, _("immediate must be a multiple of 2 in the"
17511 " range of +/-[0,254]"));
17512 break;
17513 case 32:
17514 constraint (1, _("immediate must be a multiple of 4 in the"
17515 " range of +/-[0,508]"));
17516 break;
17517 }
17518 }
17519
17520 if (size != elsize)
17521 {
17522 constraint (inst.operands[1].reg > 7, BAD_HIREG);
17523 constraint (inst.operands[0].reg > 14,
17524 _("MVE vector register in the range [Q0..Q7] expected"));
17525 inst.instruction |= (load && type == NT_unsigned) << 28;
17526 inst.instruction |= (size == 16) << 19;
17527 inst.instruction |= neon_logbits (elsize) << 7;
17528 }
17529 else
17530 {
17531 if (inst.operands[1].reg == REG_PC)
17532 as_tsktsk (MVE_BAD_PC);
17533 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17534 as_tsktsk (MVE_BAD_SP);
17535 inst.instruction |= 1 << 12;
17536 inst.instruction |= neon_logbits (size) << 7;
17537 }
17538 inst.instruction |= inst.operands[1].preind << 24;
17539 inst.instruction |= add << 23;
17540 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17541 inst.instruction |= inst.operands[1].writeback << 21;
17542 inst.instruction |= inst.operands[1].reg << 16;
17543 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17544 inst.instruction &= 0xffffff80;
17545 inst.instruction |= imm >> neon_logbits (size);
17546
17547 }
17548
17549 static void
17550 do_mve_vstr_vldr (void)
17551 {
17552 unsigned size;
17553 int load = 0;
17554
17555 if (inst.cond > COND_ALWAYS)
17556 inst.pred_insn_type = INSIDE_VPT_INSN;
17557 else
17558 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17559
17560 switch (inst.instruction)
17561 {
17562 default:
17563 gas_assert (0);
17564 break;
17565 case M_MNEM_vldrb:
17566 load = 1;
17567 /* fall through. */
17568 case M_MNEM_vstrb:
17569 size = 8;
17570 break;
17571 case M_MNEM_vldrh:
17572 load = 1;
17573 /* fall through. */
17574 case M_MNEM_vstrh:
17575 size = 16;
17576 break;
17577 case M_MNEM_vldrw:
17578 load = 1;
17579 /* fall through. */
17580 case M_MNEM_vstrw:
17581 size = 32;
17582 break;
17583 case M_MNEM_vldrd:
17584 load = 1;
17585 /* fall through. */
17586 case M_MNEM_vstrd:
17587 size = 64;
17588 break;
17589 }
17590 unsigned elsize = inst.vectype.el[0].size;
17591
17592 if (inst.operands[1].isquad)
17593 {
17594 /* We are dealing with [Q, imm]{!} cases. */
17595 do_mve_vstr_vldr_QI (size, elsize, load);
17596 }
17597 else
17598 {
17599 if (inst.operands[1].immisreg == 2)
17600 {
17601 /* We are dealing with [R, Q, {UXTW #os}] cases. */
17602 do_mve_vstr_vldr_RQ (size, elsize, load);
17603 }
17604 else if (!inst.operands[1].immisreg)
17605 {
17606 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
17607 do_mve_vstr_vldr_RI (size, elsize, load);
17608 }
17609 else
17610 constraint (1, BAD_ADDR_MODE);
17611 }
17612
17613 inst.is_neon = 1;
17614 }
17615
17616 static void
17617 do_mve_vst_vld (void)
17618 {
17619 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17620 return;
17621
17622 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17623 || inst.relocs[0].exp.X_add_number != 0
17624 || inst.operands[1].immisreg != 0,
17625 BAD_ADDR_MODE);
17626 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17627 if (inst.operands[1].reg == REG_PC)
17628 as_tsktsk (MVE_BAD_PC);
17629 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17630 as_tsktsk (MVE_BAD_SP);
17631
17632
17633 /* These instructions are one of the "exceptions" mentioned in
17634 handle_pred_state. They are MVE instructions that are not VPT compatible
17635 and do not accept a VPT code, thus appending such a code is a syntax
17636 error. */
17637 if (inst.cond > COND_ALWAYS)
17638 first_error (BAD_SYNTAX);
17639 /* If we append a scalar condition code we can set this to
17640 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
17641 else if (inst.cond < COND_ALWAYS)
17642 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17643 else
17644 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17645
17646 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17647 inst.instruction |= inst.operands[1].writeback << 21;
17648 inst.instruction |= inst.operands[1].reg << 16;
17649 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17650 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17651 inst.is_neon = 1;
17652 }
17653
17654 static void
17655 do_mve_vaddlv (void)
17656 {
17657 enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17658 struct neon_type_el et
17659 = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17660
17661 if (et.type == NT_invtype)
17662 first_error (BAD_EL_TYPE);
17663
17664 if (inst.cond > COND_ALWAYS)
17665 inst.pred_insn_type = INSIDE_VPT_INSN;
17666 else
17667 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17668
17669 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17670
17671 inst.instruction |= (et.type == NT_unsigned) << 28;
17672 inst.instruction |= inst.operands[1].reg << 19;
17673 inst.instruction |= inst.operands[0].reg << 12;
17674 inst.instruction |= inst.operands[2].reg;
17675 inst.is_neon = 1;
17676 }
17677
17678 static void
17679 do_neon_dyadic_if_su (void)
17680 {
17681 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17682 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17683 N_SUF_32 | N_KEY);
17684
17685 constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17686 || inst.instruction == ((unsigned) N_MNEM_vmin))
17687 && et.type == NT_float
17688 && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17689
17690 if (!check_simd_pred_availability (et.type == NT_float,
17691 NEON_CHECK_ARCH | NEON_CHECK_CC))
17692 return;
17693
17694 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17695 }
17696
17697 static void
17698 do_neon_addsub_if_i (void)
17699 {
17700 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17701 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17702 return;
17703
17704 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17705 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17706 N_EQK, N_IF_32 | N_I64 | N_KEY);
17707
17708 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17709 /* If we are parsing Q registers and the element types match MVE, which NEON
17710 also supports, then we must check whether this is an instruction that can
17711 be used by both MVE/NEON. This distinction can be made based on whether
17712 they are predicated or not. */
17713 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17714 {
17715 if (!check_simd_pred_availability (et.type == NT_float,
17716 NEON_CHECK_ARCH | NEON_CHECK_CC))
17717 return;
17718 }
17719 else
17720 {
17721 /* If they are either in a D register or are using an unsupported. */
17722 if (rs != NS_QQR
17723 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17724 return;
17725 }
17726
17727 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17728 affected if we specify unsigned args. */
17729 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17730 }
17731
17732 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17733 result to be:
17734 V<op> A,B (A is operand 0, B is operand 2)
17735 to mean:
17736 V<op> A,B,A
17737 not:
17738 V<op> A,B,B
17739 so handle that case specially. */
17740
17741 static void
17742 neon_exchange_operands (void)
17743 {
17744 if (inst.operands[1].present)
17745 {
17746 void *scratch = xmalloc (sizeof (inst.operands[0]));
17747
17748 /* Swap operands[1] and operands[2]. */
17749 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17750 inst.operands[1] = inst.operands[2];
17751 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17752 free (scratch);
17753 }
17754 else
17755 {
17756 inst.operands[1] = inst.operands[2];
17757 inst.operands[2] = inst.operands[0];
17758 }
17759 }
17760
17761 static void
17762 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17763 {
17764 if (inst.operands[2].isreg)
17765 {
17766 if (invert)
17767 neon_exchange_operands ();
17768 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17769 }
17770 else
17771 {
17772 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17773 struct neon_type_el et = neon_check_type (2, rs,
17774 N_EQK | N_SIZ, immtypes | N_KEY);
17775
17776 NEON_ENCODE (IMMED, inst);
17777 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17778 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17779 inst.instruction |= LOW4 (inst.operands[1].reg);
17780 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17781 inst.instruction |= neon_quad (rs) << 6;
17782 inst.instruction |= (et.type == NT_float) << 10;
17783 inst.instruction |= neon_logbits (et.size) << 18;
17784
17785 neon_dp_fixup (&inst);
17786 }
17787 }
17788
17789 static void
17790 do_neon_cmp (void)
17791 {
17792 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
17793 }
17794
17795 static void
17796 do_neon_cmp_inv (void)
17797 {
17798 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
17799 }
17800
17801 static void
17802 do_neon_ceq (void)
17803 {
17804 neon_compare (N_IF_32, N_IF_32, FALSE);
17805 }
17806
17807 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
17808 scalars, which are encoded in 5 bits, M : Rm.
17809 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17810 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17811 index in M.
17812
17813 Dot Product instructions are similar to multiply instructions except elsize
17814 should always be 32.
17815
17816 This function translates SCALAR, which is GAS's internal encoding of indexed
17817 scalar register, to raw encoding. There is also register and index range
17818 check based on ELSIZE. */
17819
17820 static unsigned
17821 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17822 {
17823 unsigned regno = NEON_SCALAR_REG (scalar);
17824 unsigned elno = NEON_SCALAR_INDEX (scalar);
17825
17826 switch (elsize)
17827 {
17828 case 16:
17829 if (regno > 7 || elno > 3)
17830 goto bad_scalar;
17831 return regno | (elno << 3);
17832
17833 case 32:
17834 if (regno > 15 || elno > 1)
17835 goto bad_scalar;
17836 return regno | (elno << 4);
17837
17838 default:
17839 bad_scalar:
17840 first_error (_("scalar out of range for multiply instruction"));
17841 }
17842
17843 return 0;
17844 }
17845
17846 /* Encode multiply / multiply-accumulate scalar instructions. */
17847
17848 static void
17849 neon_mul_mac (struct neon_type_el et, int ubit)
17850 {
17851 unsigned scalar;
17852
17853 /* Give a more helpful error message if we have an invalid type. */
17854 if (et.type == NT_invtype)
17855 return;
17856
17857 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17858 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17859 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17860 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17861 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17862 inst.instruction |= LOW4 (scalar);
17863 inst.instruction |= HI1 (scalar) << 5;
17864 inst.instruction |= (et.type == NT_float) << 8;
17865 inst.instruction |= neon_logbits (et.size) << 20;
17866 inst.instruction |= (ubit != 0) << 24;
17867
17868 neon_dp_fixup (&inst);
17869 }
17870
17871 static void
17872 do_neon_mac_maybe_scalar (void)
17873 {
17874 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17875 return;
17876
17877 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17878 return;
17879
17880 if (inst.operands[2].isscalar)
17881 {
17882 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17883 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17884 struct neon_type_el et = neon_check_type (3, rs,
17885 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17886 NEON_ENCODE (SCALAR, inst);
17887 neon_mul_mac (et, neon_quad (rs));
17888 }
17889 else if (!inst.operands[2].isvec)
17890 {
17891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17892
17893 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17894 neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17895
17896 neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17897 }
17898 else
17899 {
17900 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17901 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17902 affected if we specify unsigned args. */
17903 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17904 }
17905 }
17906
17907 static void
17908 do_bfloat_vfma (void)
17909 {
17910 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17911 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17912 enum neon_shape rs;
17913 int t_bit = 0;
17914
17915 if (inst.instruction != B_MNEM_vfmab)
17916 {
17917 t_bit = 1;
17918 inst.instruction = B_MNEM_vfmat;
17919 }
17920
17921 if (inst.operands[2].isscalar)
17922 {
17923 rs = neon_select_shape (NS_QQS, NS_NULL);
17924 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17925
17926 inst.instruction |= (1 << 25);
17927 int index = inst.operands[2].reg & 0xf;
17928 constraint (!(index < 4), _("index must be in the range 0 to 3"));
17929 inst.operands[2].reg >>= 4;
17930 constraint (!(inst.operands[2].reg < 8),
17931 _("indexed register must be less than 8"));
17932 neon_three_args (t_bit);
17933 inst.instruction |= ((index & 1) << 3);
17934 inst.instruction |= ((index & 2) << 4);
17935 }
17936 else
17937 {
17938 rs = neon_select_shape (NS_QQQ, NS_NULL);
17939 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17940 neon_three_args (t_bit);
17941 }
17942
17943 }
17944
17945 static void
17946 do_neon_fmac (void)
17947 {
17948 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17949 && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17950 return;
17951
17952 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17953 return;
17954
17955 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17956 {
17957 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17958 struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17959 N_EQK);
17960
17961 if (rs == NS_QQR)
17962 {
17963
17964 if (inst.operands[2].reg == REG_SP)
17965 as_tsktsk (MVE_BAD_SP);
17966 else if (inst.operands[2].reg == REG_PC)
17967 as_tsktsk (MVE_BAD_PC);
17968
17969 inst.instruction = 0xee310e40;
17970 inst.instruction |= (et.size == 16) << 28;
17971 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17972 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17973 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17974 inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17975 inst.instruction |= inst.operands[2].reg;
17976 inst.is_neon = 1;
17977 return;
17978 }
17979 }
17980 else
17981 {
17982 constraint (!inst.operands[2].isvec, BAD_FPU);
17983 }
17984
17985 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17986 }
17987
17988 static void
17989 do_mve_vfma (void)
17990 {
17991 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
17992 inst.cond == COND_ALWAYS)
17993 {
17994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17995 inst.instruction = N_MNEM_vfma;
17996 inst.pred_insn_type = INSIDE_VPT_INSN;
17997 inst.cond = 0xf;
17998 return do_neon_fmac();
17999 }
18000 else
18001 {
18002 do_bfloat_vfma();
18003 }
18004 }
18005
18006 static void
18007 do_neon_tst (void)
18008 {
18009 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18010 struct neon_type_el et = neon_check_type (3, rs,
18011 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18012 neon_three_same (neon_quad (rs), 0, et.size);
18013 }
18014
18015 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
18016 same types as the MAC equivalents. The polynomial type for this instruction
18017 is encoded the same as the integer type. */
18018
18019 static void
18020 do_neon_mul (void)
18021 {
18022 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
18023 return;
18024
18025 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
18026 return;
18027
18028 if (inst.operands[2].isscalar)
18029 {
18030 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18031 do_neon_mac_maybe_scalar ();
18032 }
18033 else
18034 {
18035 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18036 {
18037 enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18038 struct neon_type_el et
18039 = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
18040 if (et.type == NT_float)
18041 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
18042 BAD_FPU);
18043
18044 neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
18045 }
18046 else
18047 {
18048 constraint (!inst.operands[2].isvec, BAD_FPU);
18049 neon_dyadic_misc (NT_poly,
18050 N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
18051 }
18052 }
18053 }
18054
18055 static void
18056 do_neon_qdmulh (void)
18057 {
18058 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18059 return;
18060
18061 if (inst.operands[2].isscalar)
18062 {
18063 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18064 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18065 struct neon_type_el et = neon_check_type (3, rs,
18066 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18067 NEON_ENCODE (SCALAR, inst);
18068 neon_mul_mac (et, neon_quad (rs));
18069 }
18070 else
18071 {
18072 enum neon_shape rs;
18073 struct neon_type_el et;
18074 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18075 {
18076 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18077 et = neon_check_type (3, rs,
18078 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18079 }
18080 else
18081 {
18082 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18083 et = neon_check_type (3, rs,
18084 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18085 }
18086
18087 NEON_ENCODE (INTEGER, inst);
18088 if (rs == NS_QQR)
18089 mve_encode_qqr (et.size, 0, 0);
18090 else
18091 /* The U bit (rounding) comes from bit mask. */
18092 neon_three_same (neon_quad (rs), 0, et.size);
18093 }
18094 }
18095
18096 static void
18097 do_mve_vaddv (void)
18098 {
18099 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18100 struct neon_type_el et
18101 = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18102
18103 if (et.type == NT_invtype)
18104 first_error (BAD_EL_TYPE);
18105
18106 if (inst.cond > COND_ALWAYS)
18107 inst.pred_insn_type = INSIDE_VPT_INSN;
18108 else
18109 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18110
18111 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18112
18113 mve_encode_rq (et.type == NT_unsigned, et.size);
18114 }
18115
18116 static void
18117 do_mve_vhcadd (void)
18118 {
18119 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18120 struct neon_type_el et
18121 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18122
18123 if (inst.cond > COND_ALWAYS)
18124 inst.pred_insn_type = INSIDE_VPT_INSN;
18125 else
18126 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18127
18128 unsigned rot = inst.relocs[0].exp.X_add_number;
18129 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18130
18131 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18132 as_tsktsk (_("Warning: 32-bit element size and same first and third "
18133 "operand makes instruction UNPREDICTABLE"));
18134
18135 mve_encode_qqq (0, et.size);
18136 inst.instruction |= (rot == 270) << 12;
18137 inst.is_neon = 1;
18138 }
18139
18140 static void
18141 do_mve_vqdmull (void)
18142 {
18143 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18144 struct neon_type_el et
18145 = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18146
18147 if (et.size == 32
18148 && (inst.operands[0].reg == inst.operands[1].reg
18149 || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18150 as_tsktsk (BAD_MVE_SRCDEST);
18151
18152 if (inst.cond > COND_ALWAYS)
18153 inst.pred_insn_type = INSIDE_VPT_INSN;
18154 else
18155 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18156
18157 if (rs == NS_QQQ)
18158 {
18159 mve_encode_qqq (et.size == 32, 64);
18160 inst.instruction |= 1;
18161 }
18162 else
18163 {
18164 mve_encode_qqr (64, et.size == 32, 0);
18165 inst.instruction |= 0x3 << 5;
18166 }
18167 }
18168
18169 static void
18170 do_mve_vadc (void)
18171 {
18172 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18173 struct neon_type_el et
18174 = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18175
18176 if (et.type == NT_invtype)
18177 first_error (BAD_EL_TYPE);
18178
18179 if (inst.cond > COND_ALWAYS)
18180 inst.pred_insn_type = INSIDE_VPT_INSN;
18181 else
18182 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18183
18184 mve_encode_qqq (0, 64);
18185 }
18186
18187 static void
18188 do_mve_vbrsr (void)
18189 {
18190 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18191 struct neon_type_el et
18192 = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18193
18194 if (inst.cond > COND_ALWAYS)
18195 inst.pred_insn_type = INSIDE_VPT_INSN;
18196 else
18197 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18198
18199 mve_encode_qqr (et.size, 0, 0);
18200 }
18201
18202 static void
18203 do_mve_vsbc (void)
18204 {
18205 neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18206
18207 if (inst.cond > COND_ALWAYS)
18208 inst.pred_insn_type = INSIDE_VPT_INSN;
18209 else
18210 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18211
18212 mve_encode_qqq (1, 64);
18213 }
18214
18215 static void
18216 do_mve_vmulh (void)
18217 {
18218 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18219 struct neon_type_el et
18220 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18221
18222 if (inst.cond > COND_ALWAYS)
18223 inst.pred_insn_type = INSIDE_VPT_INSN;
18224 else
18225 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18226
18227 mve_encode_qqq (et.type == NT_unsigned, et.size);
18228 }
18229
18230 static void
18231 do_mve_vqdmlah (void)
18232 {
18233 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18234 struct neon_type_el et
18235 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18236
18237 if (inst.cond > COND_ALWAYS)
18238 inst.pred_insn_type = INSIDE_VPT_INSN;
18239 else
18240 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18241
18242 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18243 }
18244
18245 static void
18246 do_mve_vqdmladh (void)
18247 {
18248 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18249 struct neon_type_el et
18250 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18251
18252 if (inst.cond > COND_ALWAYS)
18253 inst.pred_insn_type = INSIDE_VPT_INSN;
18254 else
18255 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18256
18257 mve_encode_qqq (0, et.size);
18258 }
18259
18260
18261 static void
18262 do_mve_vmull (void)
18263 {
18264
18265 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18266 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18267 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
18268 && inst.cond == COND_ALWAYS
18269 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18270 {
18271 if (rs == NS_QQQ)
18272 {
18273
18274 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18275 N_SUF_32 | N_F64 | N_P8
18276 | N_P16 | N_I_MVE | N_KEY);
18277 if (((et.type == NT_poly) && et.size == 8
18278 && ARM_CPU_IS_ANY (cpu_variant))
18279 || (et.type == NT_integer) || (et.type == NT_float))
18280 goto neon_vmul;
18281 }
18282 else
18283 goto neon_vmul;
18284 }
18285
18286 constraint (rs != NS_QQQ, BAD_FPU);
18287 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18288 N_SU_32 | N_P8 | N_P16 | N_KEY);
18289
18290 /* We are dealing with MVE's vmullt. */
18291 if (et.size == 32
18292 && (inst.operands[0].reg == inst.operands[1].reg
18293 || inst.operands[0].reg == inst.operands[2].reg))
18294 as_tsktsk (BAD_MVE_SRCDEST);
18295
18296 if (inst.cond > COND_ALWAYS)
18297 inst.pred_insn_type = INSIDE_VPT_INSN;
18298 else
18299 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18300
18301 if (et.type == NT_poly)
18302 mve_encode_qqq (neon_logbits (et.size), 64);
18303 else
18304 mve_encode_qqq (et.type == NT_unsigned, et.size);
18305
18306 return;
18307
18308 neon_vmul:
18309 inst.instruction = N_MNEM_vmul;
18310 inst.cond = 0xb;
18311 if (thumb_mode)
18312 inst.pred_insn_type = INSIDE_IT_INSN;
18313 do_neon_mul ();
18314 }
18315
18316 static void
18317 do_mve_vabav (void)
18318 {
18319 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18320
18321 if (rs == NS_NULL)
18322 return;
18323
18324 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18325 return;
18326
18327 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18328 | N_S16 | N_S32 | N_U8 | N_U16
18329 | N_U32);
18330
18331 if (inst.cond > COND_ALWAYS)
18332 inst.pred_insn_type = INSIDE_VPT_INSN;
18333 else
18334 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18335
18336 mve_encode_rqq (et.type == NT_unsigned, et.size);
18337 }
18338
18339 static void
18340 do_mve_vmladav (void)
18341 {
18342 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18343 struct neon_type_el et = neon_check_type (3, rs,
18344 N_EQK, N_EQK, N_SU_MVE | N_KEY);
18345
18346 if (et.type == NT_unsigned
18347 && (inst.instruction == M_MNEM_vmladavx
18348 || inst.instruction == M_MNEM_vmladavax
18349 || inst.instruction == M_MNEM_vmlsdav
18350 || inst.instruction == M_MNEM_vmlsdava
18351 || inst.instruction == M_MNEM_vmlsdavx
18352 || inst.instruction == M_MNEM_vmlsdavax))
18353 first_error (BAD_SIMD_TYPE);
18354
18355 constraint (inst.operands[2].reg > 14,
18356 _("MVE vector register in the range [Q0..Q7] expected"));
18357
18358 if (inst.cond > COND_ALWAYS)
18359 inst.pred_insn_type = INSIDE_VPT_INSN;
18360 else
18361 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18362
18363 if (inst.instruction == M_MNEM_vmlsdav
18364 || inst.instruction == M_MNEM_vmlsdava
18365 || inst.instruction == M_MNEM_vmlsdavx
18366 || inst.instruction == M_MNEM_vmlsdavax)
18367 inst.instruction |= (et.size == 8) << 28;
18368 else
18369 inst.instruction |= (et.size == 8) << 8;
18370
18371 mve_encode_rqq (et.type == NT_unsigned, 64);
18372 inst.instruction |= (et.size == 32) << 16;
18373 }
18374
18375 static void
18376 do_mve_vmlaldav (void)
18377 {
18378 enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18379 struct neon_type_el et
18380 = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18381 N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18382
18383 if (et.type == NT_unsigned
18384 && (inst.instruction == M_MNEM_vmlsldav
18385 || inst.instruction == M_MNEM_vmlsldava
18386 || inst.instruction == M_MNEM_vmlsldavx
18387 || inst.instruction == M_MNEM_vmlsldavax))
18388 first_error (BAD_SIMD_TYPE);
18389
18390 if (inst.cond > COND_ALWAYS)
18391 inst.pred_insn_type = INSIDE_VPT_INSN;
18392 else
18393 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18394
18395 mve_encode_rrqq (et.type == NT_unsigned, et.size);
18396 }
18397
18398 static void
18399 do_mve_vrmlaldavh (void)
18400 {
18401 struct neon_type_el et;
18402 if (inst.instruction == M_MNEM_vrmlsldavh
18403 || inst.instruction == M_MNEM_vrmlsldavha
18404 || inst.instruction == M_MNEM_vrmlsldavhx
18405 || inst.instruction == M_MNEM_vrmlsldavhax)
18406 {
18407 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18408 if (inst.operands[1].reg == REG_SP)
18409 as_tsktsk (MVE_BAD_SP);
18410 }
18411 else
18412 {
18413 if (inst.instruction == M_MNEM_vrmlaldavhx
18414 || inst.instruction == M_MNEM_vrmlaldavhax)
18415 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18416 else
18417 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18418 N_U32 | N_S32 | N_KEY);
18419 /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18420 with vmax/min instructions, making the use of SP in assembly really
18421 nonsensical, so instead of issuing a warning like we do for other uses
18422 of SP for the odd register operand we error out. */
18423 constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18424 }
18425
18426 /* Make sure we still check the second operand is an odd one and that PC is
18427 disallowed. This because we are parsing for any GPR operand, to be able
18428 to distinguish between giving a warning or an error for SP as described
18429 above. */
18430 constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18431 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18432
18433 if (inst.cond > COND_ALWAYS)
18434 inst.pred_insn_type = INSIDE_VPT_INSN;
18435 else
18436 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18437
18438 mve_encode_rrqq (et.type == NT_unsigned, 0);
18439 }
18440
18441
18442 static void
18443 do_mve_vmaxnmv (void)
18444 {
18445 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18446 struct neon_type_el et
18447 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18448
18449 if (inst.cond > COND_ALWAYS)
18450 inst.pred_insn_type = INSIDE_VPT_INSN;
18451 else
18452 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18453
18454 if (inst.operands[0].reg == REG_SP)
18455 as_tsktsk (MVE_BAD_SP);
18456 else if (inst.operands[0].reg == REG_PC)
18457 as_tsktsk (MVE_BAD_PC);
18458
18459 mve_encode_rq (et.size == 16, 64);
18460 }
18461
18462 static void
18463 do_mve_vmaxv (void)
18464 {
18465 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18466 struct neon_type_el et;
18467
18468 if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18469 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18470 else
18471 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18472
18473 if (inst.cond > COND_ALWAYS)
18474 inst.pred_insn_type = INSIDE_VPT_INSN;
18475 else
18476 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18477
18478 if (inst.operands[0].reg == REG_SP)
18479 as_tsktsk (MVE_BAD_SP);
18480 else if (inst.operands[0].reg == REG_PC)
18481 as_tsktsk (MVE_BAD_PC);
18482
18483 mve_encode_rq (et.type == NT_unsigned, et.size);
18484 }
18485
18486
18487 static void
18488 do_neon_qrdmlah (void)
18489 {
18490 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18491 return;
18492 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18493 {
18494 /* Check we're on the correct architecture. */
18495 if (!mark_feature_used (&fpu_neon_ext_armv8))
18496 inst.error
18497 = _("instruction form not available on this architecture.");
18498 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18499 {
18500 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18501 record_feature_use (&fpu_neon_ext_v8_1);
18502 }
18503 if (inst.operands[2].isscalar)
18504 {
18505 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18506 struct neon_type_el et = neon_check_type (3, rs,
18507 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18508 NEON_ENCODE (SCALAR, inst);
18509 neon_mul_mac (et, neon_quad (rs));
18510 }
18511 else
18512 {
18513 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18514 struct neon_type_el et = neon_check_type (3, rs,
18515 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18516 NEON_ENCODE (INTEGER, inst);
18517 /* The U bit (rounding) comes from bit mask. */
18518 neon_three_same (neon_quad (rs), 0, et.size);
18519 }
18520 }
18521 else
18522 {
18523 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18524 struct neon_type_el et
18525 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18526
18527 NEON_ENCODE (INTEGER, inst);
18528 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18529 }
18530 }
18531
18532 static void
18533 do_neon_fcmp_absolute (void)
18534 {
18535 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18536 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18537 N_F_16_32 | N_KEY);
18538 /* Size field comes from bit mask. */
18539 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18540 }
18541
18542 static void
18543 do_neon_fcmp_absolute_inv (void)
18544 {
18545 neon_exchange_operands ();
18546 do_neon_fcmp_absolute ();
18547 }
18548
18549 static void
18550 do_neon_step (void)
18551 {
18552 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18553 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18554 N_F_16_32 | N_KEY);
18555 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18556 }
18557
18558 static void
18559 do_neon_abs_neg (void)
18560 {
18561 enum neon_shape rs;
18562 struct neon_type_el et;
18563
18564 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18565 return;
18566
18567 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18568 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18569
18570 if (!check_simd_pred_availability (et.type == NT_float,
18571 NEON_CHECK_ARCH | NEON_CHECK_CC))
18572 return;
18573
18574 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18575 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18576 inst.instruction |= LOW4 (inst.operands[1].reg);
18577 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18578 inst.instruction |= neon_quad (rs) << 6;
18579 inst.instruction |= (et.type == NT_float) << 10;
18580 inst.instruction |= neon_logbits (et.size) << 18;
18581
18582 neon_dp_fixup (&inst);
18583 }
18584
18585 static void
18586 do_neon_sli (void)
18587 {
18588 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18589 return;
18590
18591 enum neon_shape rs;
18592 struct neon_type_el et;
18593 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18594 {
18595 rs = neon_select_shape (NS_QQI, NS_NULL);
18596 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18597 }
18598 else
18599 {
18600 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18601 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18602 }
18603
18604
18605 int imm = inst.operands[2].imm;
18606 constraint (imm < 0 || (unsigned)imm >= et.size,
18607 _("immediate out of range for insert"));
18608 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18609 }
18610
18611 static void
18612 do_neon_sri (void)
18613 {
18614 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18615 return;
18616
18617 enum neon_shape rs;
18618 struct neon_type_el et;
18619 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18620 {
18621 rs = neon_select_shape (NS_QQI, NS_NULL);
18622 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18623 }
18624 else
18625 {
18626 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18627 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18628 }
18629
18630 int imm = inst.operands[2].imm;
18631 constraint (imm < 1 || (unsigned)imm > et.size,
18632 _("immediate out of range for insert"));
18633 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
18634 }
18635
18636 static void
18637 do_neon_qshlu_imm (void)
18638 {
18639 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18640 return;
18641
18642 enum neon_shape rs;
18643 struct neon_type_el et;
18644 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18645 {
18646 rs = neon_select_shape (NS_QQI, NS_NULL);
18647 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18648 }
18649 else
18650 {
18651 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18652 et = neon_check_type (2, rs, N_EQK | N_UNS,
18653 N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18654 }
18655
18656 int imm = inst.operands[2].imm;
18657 constraint (imm < 0 || (unsigned)imm >= et.size,
18658 _("immediate out of range for shift"));
18659 /* Only encodes the 'U present' variant of the instruction.
18660 In this case, signed types have OP (bit 8) set to 0.
18661 Unsigned types have OP set to 1. */
18662 inst.instruction |= (et.type == NT_unsigned) << 8;
18663 /* The rest of the bits are the same as other immediate shifts. */
18664 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18665 }
18666
18667 static void
18668 do_neon_qmovn (void)
18669 {
18670 struct neon_type_el et = neon_check_type (2, NS_DQ,
18671 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18672 /* Saturating move where operands can be signed or unsigned, and the
18673 destination has the same signedness. */
18674 NEON_ENCODE (INTEGER, inst);
18675 if (et.type == NT_unsigned)
18676 inst.instruction |= 0xc0;
18677 else
18678 inst.instruction |= 0x80;
18679 neon_two_same (0, 1, et.size / 2);
18680 }
18681
18682 static void
18683 do_neon_qmovun (void)
18684 {
18685 struct neon_type_el et = neon_check_type (2, NS_DQ,
18686 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18687 /* Saturating move with unsigned results. Operands must be signed. */
18688 NEON_ENCODE (INTEGER, inst);
18689 neon_two_same (0, 1, et.size / 2);
18690 }
18691
18692 static void
18693 do_neon_rshift_sat_narrow (void)
18694 {
18695 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18696 or unsigned. If operands are unsigned, results must also be unsigned. */
18697 struct neon_type_el et = neon_check_type (2, NS_DQI,
18698 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18699 int imm = inst.operands[2].imm;
18700 /* This gets the bounds check, size encoding and immediate bits calculation
18701 right. */
18702 et.size /= 2;
18703
18704 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18705 VQMOVN.I<size> <Dd>, <Qm>. */
18706 if (imm == 0)
18707 {
18708 inst.operands[2].present = 0;
18709 inst.instruction = N_MNEM_vqmovn;
18710 do_neon_qmovn ();
18711 return;
18712 }
18713
18714 constraint (imm < 1 || (unsigned)imm > et.size,
18715 _("immediate out of range"));
18716 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
18717 }
18718
18719 static void
18720 do_neon_rshift_sat_narrow_u (void)
18721 {
18722 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18723 or unsigned. If operands are unsigned, results must also be unsigned. */
18724 struct neon_type_el et = neon_check_type (2, NS_DQI,
18725 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18726 int imm = inst.operands[2].imm;
18727 /* This gets the bounds check, size encoding and immediate bits calculation
18728 right. */
18729 et.size /= 2;
18730
18731 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18732 VQMOVUN.I<size> <Dd>, <Qm>. */
18733 if (imm == 0)
18734 {
18735 inst.operands[2].present = 0;
18736 inst.instruction = N_MNEM_vqmovun;
18737 do_neon_qmovun ();
18738 return;
18739 }
18740
18741 constraint (imm < 1 || (unsigned)imm > et.size,
18742 _("immediate out of range"));
18743 /* FIXME: The manual is kind of unclear about what value U should have in
18744 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18745 must be 1. */
18746 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
18747 }
18748
18749 static void
18750 do_neon_movn (void)
18751 {
18752 struct neon_type_el et = neon_check_type (2, NS_DQ,
18753 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18754 NEON_ENCODE (INTEGER, inst);
18755 neon_two_same (0, 1, et.size / 2);
18756 }
18757
18758 static void
18759 do_neon_rshift_narrow (void)
18760 {
18761 struct neon_type_el et = neon_check_type (2, NS_DQI,
18762 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18763 int imm = inst.operands[2].imm;
18764 /* This gets the bounds check, size encoding and immediate bits calculation
18765 right. */
18766 et.size /= 2;
18767
18768 /* If immediate is zero then we are a pseudo-instruction for
18769 VMOVN.I<size> <Dd>, <Qm> */
18770 if (imm == 0)
18771 {
18772 inst.operands[2].present = 0;
18773 inst.instruction = N_MNEM_vmovn;
18774 do_neon_movn ();
18775 return;
18776 }
18777
18778 constraint (imm < 1 || (unsigned)imm > et.size,
18779 _("immediate out of range for narrowing operation"));
18780 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
18781 }
18782
18783 static void
18784 do_neon_shll (void)
18785 {
18786 /* FIXME: Type checking when lengthening. */
18787 struct neon_type_el et = neon_check_type (2, NS_QDI,
18788 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18789 unsigned imm = inst.operands[2].imm;
18790
18791 if (imm == et.size)
18792 {
18793 /* Maximum shift variant. */
18794 NEON_ENCODE (INTEGER, inst);
18795 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18796 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18797 inst.instruction |= LOW4 (inst.operands[1].reg);
18798 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18799 inst.instruction |= neon_logbits (et.size) << 18;
18800
18801 neon_dp_fixup (&inst);
18802 }
18803 else
18804 {
18805 /* A more-specific type check for non-max versions. */
18806 et = neon_check_type (2, NS_QDI,
18807 N_EQK | N_DBL, N_SU_32 | N_KEY);
18808 NEON_ENCODE (IMMED, inst);
18809 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
18810 }
18811 }
18812
18813 /* Check the various types for the VCVT instruction, and return which version
18814 the current instruction is. */
18815
18816 #define CVT_FLAVOUR_VAR \
18817 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
18818 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
18819 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
18820 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
18821 /* Half-precision conversions. */ \
18822 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18823 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18824 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
18825 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
18826 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
18827 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
18828 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
18829 Compared with single/double precision variants, only the co-processor \
18830 field is different, so the encoding flow is reused here. */ \
18831 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
18832 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
18833 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18834 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18835 CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg, NULL, NULL, NULL) \
18836 /* VFP instructions. */ \
18837 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
18838 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
18839 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18840 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18841 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
18842 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
18843 /* VFP instructions with bitshift. */ \
18844 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
18845 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
18846 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
18847 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
18848 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
18849 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
18850 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
18851 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
18852
18853 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18854 neon_cvt_flavour_##C,
18855
18856 /* The different types of conversions we can do. */
18857 enum neon_cvt_flavour
18858 {
18859 CVT_FLAVOUR_VAR
18860 neon_cvt_flavour_invalid,
18861 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18862 };
18863
18864 #undef CVT_VAR
18865
18866 static enum neon_cvt_flavour
18867 get_neon_cvt_flavour (enum neon_shape rs)
18868 {
18869 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
18870 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
18871 if (et.type != NT_invtype) \
18872 { \
18873 inst.error = NULL; \
18874 return (neon_cvt_flavour_##C); \
18875 }
18876
18877 struct neon_type_el et;
18878 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18879 || rs == NS_FF) ? N_VFP : 0;
18880 /* The instruction versions which take an immediate take one register
18881 argument, which is extended to the width of the full register. Thus the
18882 "source" and "destination" registers must have the same width. Hack that
18883 here by making the size equal to the key (wider, in this case) operand. */
18884 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18885
18886 CVT_FLAVOUR_VAR;
18887
18888 return neon_cvt_flavour_invalid;
18889 #undef CVT_VAR
18890 }
18891
18892 enum neon_cvt_mode
18893 {
18894 neon_cvt_mode_a,
18895 neon_cvt_mode_n,
18896 neon_cvt_mode_p,
18897 neon_cvt_mode_m,
18898 neon_cvt_mode_z,
18899 neon_cvt_mode_x,
18900 neon_cvt_mode_r
18901 };
18902
18903 /* Neon-syntax VFP conversions. */
18904
18905 static void
18906 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18907 {
18908 const char *opname = 0;
18909
18910 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18911 || rs == NS_FHI || rs == NS_HFI)
18912 {
18913 /* Conversions with immediate bitshift. */
18914 const char *enc[] =
18915 {
18916 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18917 CVT_FLAVOUR_VAR
18918 NULL
18919 #undef CVT_VAR
18920 };
18921
18922 if (flavour < (int) ARRAY_SIZE (enc))
18923 {
18924 opname = enc[flavour];
18925 constraint (inst.operands[0].reg != inst.operands[1].reg,
18926 _("operands 0 and 1 must be the same register"));
18927 inst.operands[1] = inst.operands[2];
18928 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18929 }
18930 }
18931 else
18932 {
18933 /* Conversions without bitshift. */
18934 const char *enc[] =
18935 {
18936 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18937 CVT_FLAVOUR_VAR
18938 NULL
18939 #undef CVT_VAR
18940 };
18941
18942 if (flavour < (int) ARRAY_SIZE (enc))
18943 opname = enc[flavour];
18944 }
18945
18946 if (opname)
18947 do_vfp_nsyn_opcode (opname);
18948
18949 /* ARMv8.2 fp16 VCVT instruction. */
18950 if (flavour == neon_cvt_flavour_s32_f16
18951 || flavour == neon_cvt_flavour_u32_f16
18952 || flavour == neon_cvt_flavour_f16_u32
18953 || flavour == neon_cvt_flavour_f16_s32)
18954 do_scalar_fp16_v82_encode ();
18955 }
18956
18957 static void
18958 do_vfp_nsyn_cvtz (void)
18959 {
18960 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18961 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18962 const char *enc[] =
18963 {
18964 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18965 CVT_FLAVOUR_VAR
18966 NULL
18967 #undef CVT_VAR
18968 };
18969
18970 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18971 do_vfp_nsyn_opcode (enc[flavour]);
18972 }
18973
18974 static void
18975 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18976 enum neon_cvt_mode mode)
18977 {
18978 int sz, op;
18979 int rm;
18980
18981 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18982 D register operands. */
18983 if (flavour == neon_cvt_flavour_s32_f64
18984 || flavour == neon_cvt_flavour_u32_f64)
18985 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18986 _(BAD_FPU));
18987
18988 if (flavour == neon_cvt_flavour_s32_f16
18989 || flavour == neon_cvt_flavour_u32_f16)
18990 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18991 _(BAD_FP16));
18992
18993 set_pred_insn_type (OUTSIDE_PRED_INSN);
18994
18995 switch (flavour)
18996 {
18997 case neon_cvt_flavour_s32_f64:
18998 sz = 1;
18999 op = 1;
19000 break;
19001 case neon_cvt_flavour_s32_f32:
19002 sz = 0;
19003 op = 1;
19004 break;
19005 case neon_cvt_flavour_s32_f16:
19006 sz = 0;
19007 op = 1;
19008 break;
19009 case neon_cvt_flavour_u32_f64:
19010 sz = 1;
19011 op = 0;
19012 break;
19013 case neon_cvt_flavour_u32_f32:
19014 sz = 0;
19015 op = 0;
19016 break;
19017 case neon_cvt_flavour_u32_f16:
19018 sz = 0;
19019 op = 0;
19020 break;
19021 default:
19022 first_error (_("invalid instruction shape"));
19023 return;
19024 }
19025
19026 switch (mode)
19027 {
19028 case neon_cvt_mode_a: rm = 0; break;
19029 case neon_cvt_mode_n: rm = 1; break;
19030 case neon_cvt_mode_p: rm = 2; break;
19031 case neon_cvt_mode_m: rm = 3; break;
19032 default: first_error (_("invalid rounding mode")); return;
19033 }
19034
19035 NEON_ENCODE (FPV8, inst);
19036 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
19037 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
19038 inst.instruction |= sz << 8;
19039
19040 /* ARMv8.2 fp16 VCVT instruction. */
19041 if (flavour == neon_cvt_flavour_s32_f16
19042 ||flavour == neon_cvt_flavour_u32_f16)
19043 do_scalar_fp16_v82_encode ();
19044 inst.instruction |= op << 7;
19045 inst.instruction |= rm << 16;
19046 inst.instruction |= 0xf0000000;
19047 inst.is_neon = TRUE;
19048 }
19049
19050 static void
19051 do_neon_cvt_1 (enum neon_cvt_mode mode)
19052 {
19053 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
19054 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
19055 NS_FH, NS_HF, NS_FHI, NS_HFI,
19056 NS_NULL);
19057 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19058
19059 if (flavour == neon_cvt_flavour_invalid)
19060 return;
19061
19062 /* PR11109: Handle round-to-zero for VCVT conversions. */
19063 if (mode == neon_cvt_mode_z
19064 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19065 && (flavour == neon_cvt_flavour_s16_f16
19066 || flavour == neon_cvt_flavour_u16_f16
19067 || flavour == neon_cvt_flavour_s32_f32
19068 || flavour == neon_cvt_flavour_u32_f32
19069 || flavour == neon_cvt_flavour_s32_f64
19070 || flavour == neon_cvt_flavour_u32_f64)
19071 && (rs == NS_FD || rs == NS_FF))
19072 {
19073 do_vfp_nsyn_cvtz ();
19074 return;
19075 }
19076
19077 /* ARMv8.2 fp16 VCVT conversions. */
19078 if (mode == neon_cvt_mode_z
19079 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19080 && (flavour == neon_cvt_flavour_s32_f16
19081 || flavour == neon_cvt_flavour_u32_f16)
19082 && (rs == NS_FH))
19083 {
19084 do_vfp_nsyn_cvtz ();
19085 do_scalar_fp16_v82_encode ();
19086 return;
19087 }
19088
19089 /* VFP rather than Neon conversions. */
19090 if (flavour >= neon_cvt_flavour_first_fp)
19091 {
19092 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19093 do_vfp_nsyn_cvt (rs, flavour);
19094 else
19095 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19096
19097 return;
19098 }
19099
19100 switch (rs)
19101 {
19102 case NS_QQI:
19103 if (mode == neon_cvt_mode_z
19104 && (flavour == neon_cvt_flavour_f16_s16
19105 || flavour == neon_cvt_flavour_f16_u16
19106 || flavour == neon_cvt_flavour_s16_f16
19107 || flavour == neon_cvt_flavour_u16_f16
19108 || flavour == neon_cvt_flavour_f32_u32
19109 || flavour == neon_cvt_flavour_f32_s32
19110 || flavour == neon_cvt_flavour_s32_f32
19111 || flavour == neon_cvt_flavour_u32_f32))
19112 {
19113 if (!check_simd_pred_availability (TRUE,
19114 NEON_CHECK_CC | NEON_CHECK_ARCH))
19115 return;
19116 }
19117 else if (mode == neon_cvt_mode_n)
19118 {
19119 /* We are dealing with vcvt with the 'ne' condition. */
19120 inst.cond = 0x1;
19121 inst.instruction = N_MNEM_vcvt;
19122 do_neon_cvt_1 (neon_cvt_mode_z);
19123 return;
19124 }
19125 /* fall through. */
19126 case NS_DDI:
19127 {
19128 unsigned immbits;
19129 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19130 0x0000100, 0x1000100, 0x0, 0x1000000};
19131
19132 if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19133 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19134 return;
19135
19136 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19137 {
19138 constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19139 _("immediate value out of range"));
19140 switch (flavour)
19141 {
19142 case neon_cvt_flavour_f16_s16:
19143 case neon_cvt_flavour_f16_u16:
19144 case neon_cvt_flavour_s16_f16:
19145 case neon_cvt_flavour_u16_f16:
19146 constraint (inst.operands[2].imm > 16,
19147 _("immediate value out of range"));
19148 break;
19149 case neon_cvt_flavour_f32_u32:
19150 case neon_cvt_flavour_f32_s32:
19151 case neon_cvt_flavour_s32_f32:
19152 case neon_cvt_flavour_u32_f32:
19153 constraint (inst.operands[2].imm > 32,
19154 _("immediate value out of range"));
19155 break;
19156 default:
19157 inst.error = BAD_FPU;
19158 return;
19159 }
19160 }
19161
19162 /* Fixed-point conversion with #0 immediate is encoded as an
19163 integer conversion. */
19164 if (inst.operands[2].present && inst.operands[2].imm == 0)
19165 goto int_encode;
19166 NEON_ENCODE (IMMED, inst);
19167 if (flavour != neon_cvt_flavour_invalid)
19168 inst.instruction |= enctab[flavour];
19169 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19170 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19171 inst.instruction |= LOW4 (inst.operands[1].reg);
19172 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19173 inst.instruction |= neon_quad (rs) << 6;
19174 inst.instruction |= 1 << 21;
19175 if (flavour < neon_cvt_flavour_s16_f16)
19176 {
19177 inst.instruction |= 1 << 21;
19178 immbits = 32 - inst.operands[2].imm;
19179 inst.instruction |= immbits << 16;
19180 }
19181 else
19182 {
19183 inst.instruction |= 3 << 20;
19184 immbits = 16 - inst.operands[2].imm;
19185 inst.instruction |= immbits << 16;
19186 inst.instruction &= ~(1 << 9);
19187 }
19188
19189 neon_dp_fixup (&inst);
19190 }
19191 break;
19192
19193 case NS_QQ:
19194 if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19195 || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19196 && (flavour == neon_cvt_flavour_s16_f16
19197 || flavour == neon_cvt_flavour_u16_f16
19198 || flavour == neon_cvt_flavour_s32_f32
19199 || flavour == neon_cvt_flavour_u32_f32))
19200 {
19201 if (!check_simd_pred_availability (TRUE,
19202 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19203 return;
19204 }
19205 else if (mode == neon_cvt_mode_z
19206 && (flavour == neon_cvt_flavour_f16_s16
19207 || flavour == neon_cvt_flavour_f16_u16
19208 || flavour == neon_cvt_flavour_s16_f16
19209 || flavour == neon_cvt_flavour_u16_f16
19210 || flavour == neon_cvt_flavour_f32_u32
19211 || flavour == neon_cvt_flavour_f32_s32
19212 || flavour == neon_cvt_flavour_s32_f32
19213 || flavour == neon_cvt_flavour_u32_f32))
19214 {
19215 if (!check_simd_pred_availability (TRUE,
19216 NEON_CHECK_CC | NEON_CHECK_ARCH))
19217 return;
19218 }
19219 /* fall through. */
19220 case NS_DD:
19221 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19222 {
19223
19224 NEON_ENCODE (FLOAT, inst);
19225 if (!check_simd_pred_availability (TRUE,
19226 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19227 return;
19228
19229 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19230 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19231 inst.instruction |= LOW4 (inst.operands[1].reg);
19232 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19233 inst.instruction |= neon_quad (rs) << 6;
19234 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19235 || flavour == neon_cvt_flavour_u32_f32) << 7;
19236 inst.instruction |= mode << 8;
19237 if (flavour == neon_cvt_flavour_u16_f16
19238 || flavour == neon_cvt_flavour_s16_f16)
19239 /* Mask off the original size bits and reencode them. */
19240 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19241
19242 if (thumb_mode)
19243 inst.instruction |= 0xfc000000;
19244 else
19245 inst.instruction |= 0xf0000000;
19246 }
19247 else
19248 {
19249 int_encode:
19250 {
19251 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19252 0x100, 0x180, 0x0, 0x080};
19253
19254 NEON_ENCODE (INTEGER, inst);
19255
19256 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19257 {
19258 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19259 return;
19260 }
19261
19262 if (flavour != neon_cvt_flavour_invalid)
19263 inst.instruction |= enctab[flavour];
19264
19265 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19266 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19267 inst.instruction |= LOW4 (inst.operands[1].reg);
19268 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19269 inst.instruction |= neon_quad (rs) << 6;
19270 if (flavour >= neon_cvt_flavour_s16_f16
19271 && flavour <= neon_cvt_flavour_f16_u16)
19272 /* Half precision. */
19273 inst.instruction |= 1 << 18;
19274 else
19275 inst.instruction |= 2 << 18;
19276
19277 neon_dp_fixup (&inst);
19278 }
19279 }
19280 break;
19281
19282 /* Half-precision conversions for Advanced SIMD -- neon. */
19283 case NS_QD:
19284 case NS_DQ:
19285 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19286 return;
19287
19288 if ((rs == NS_DQ)
19289 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19290 {
19291 as_bad (_("operand size must match register width"));
19292 break;
19293 }
19294
19295 if ((rs == NS_QD)
19296 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19297 {
19298 as_bad (_("operand size must match register width"));
19299 break;
19300 }
19301
19302 if (rs == NS_DQ)
19303 {
19304 if (flavour == neon_cvt_flavour_bf16_f32)
19305 {
19306 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19307 return;
19308 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19309 /* VCVT.bf16.f32. */
19310 inst.instruction = 0x11b60640;
19311 }
19312 else
19313 /* VCVT.f16.f32. */
19314 inst.instruction = 0x3b60600;
19315 }
19316 else
19317 /* VCVT.f32.f16. */
19318 inst.instruction = 0x3b60700;
19319
19320 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19321 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19322 inst.instruction |= LOW4 (inst.operands[1].reg);
19323 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19324 neon_dp_fixup (&inst);
19325 break;
19326
19327 default:
19328 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
19329 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19330 do_vfp_nsyn_cvt (rs, flavour);
19331 else
19332 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19333 }
19334 }
19335
19336 static void
19337 do_neon_cvtr (void)
19338 {
19339 do_neon_cvt_1 (neon_cvt_mode_x);
19340 }
19341
19342 static void
19343 do_neon_cvt (void)
19344 {
19345 do_neon_cvt_1 (neon_cvt_mode_z);
19346 }
19347
19348 static void
19349 do_neon_cvta (void)
19350 {
19351 do_neon_cvt_1 (neon_cvt_mode_a);
19352 }
19353
19354 static void
19355 do_neon_cvtn (void)
19356 {
19357 do_neon_cvt_1 (neon_cvt_mode_n);
19358 }
19359
19360 static void
19361 do_neon_cvtp (void)
19362 {
19363 do_neon_cvt_1 (neon_cvt_mode_p);
19364 }
19365
19366 static void
19367 do_neon_cvtm (void)
19368 {
19369 do_neon_cvt_1 (neon_cvt_mode_m);
19370 }
19371
19372 static void
19373 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
19374 {
19375 if (is_double)
19376 mark_feature_used (&fpu_vfp_ext_armv8);
19377
19378 encode_arm_vfp_reg (inst.operands[0].reg,
19379 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19380 encode_arm_vfp_reg (inst.operands[1].reg,
19381 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19382 inst.instruction |= to ? 0x10000 : 0;
19383 inst.instruction |= t ? 0x80 : 0;
19384 inst.instruction |= is_double ? 0x100 : 0;
19385 do_vfp_cond_or_thumb ();
19386 }
19387
19388 static void
19389 do_neon_cvttb_1 (bfd_boolean t)
19390 {
19391 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19392 NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19393
19394 if (rs == NS_NULL)
19395 return;
19396 else if (rs == NS_QQ || rs == NS_QQI)
19397 {
19398 int single_to_half = 0;
19399 if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
19400 return;
19401
19402 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19403
19404 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19405 && (flavour == neon_cvt_flavour_u16_f16
19406 || flavour == neon_cvt_flavour_s16_f16
19407 || flavour == neon_cvt_flavour_f16_s16
19408 || flavour == neon_cvt_flavour_f16_u16
19409 || flavour == neon_cvt_flavour_u32_f32
19410 || flavour == neon_cvt_flavour_s32_f32
19411 || flavour == neon_cvt_flavour_f32_s32
19412 || flavour == neon_cvt_flavour_f32_u32))
19413 {
19414 inst.cond = 0xf;
19415 inst.instruction = N_MNEM_vcvt;
19416 set_pred_insn_type (INSIDE_VPT_INSN);
19417 do_neon_cvt_1 (neon_cvt_mode_z);
19418 return;
19419 }
19420 else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19421 single_to_half = 1;
19422 else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19423 {
19424 first_error (BAD_FPU);
19425 return;
19426 }
19427
19428 inst.instruction = 0xee3f0e01;
19429 inst.instruction |= single_to_half << 28;
19430 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19431 inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19432 inst.instruction |= t << 12;
19433 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19434 inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19435 inst.is_neon = 1;
19436 }
19437 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19438 {
19439 inst.error = NULL;
19440 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19441 }
19442 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19443 {
19444 inst.error = NULL;
19445 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
19446 }
19447 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19448 {
19449 /* The VCVTB and VCVTT instructions with D-register operands
19450 don't work for SP only targets. */
19451 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19452 _(BAD_FPU));
19453
19454 inst.error = NULL;
19455 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
19456 }
19457 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19458 {
19459 /* The VCVTB and VCVTT instructions with D-register operands
19460 don't work for SP only targets. */
19461 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19462 _(BAD_FPU));
19463
19464 inst.error = NULL;
19465 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
19466 }
19467 else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19468 {
19469 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19470 inst.error = NULL;
19471 inst.instruction |= (1 << 8);
19472 inst.instruction &= ~(1 << 9);
19473 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19474 }
19475 else
19476 return;
19477 }
19478
19479 static void
19480 do_neon_cvtb (void)
19481 {
19482 do_neon_cvttb_1 (FALSE);
19483 }
19484
19485
19486 static void
19487 do_neon_cvtt (void)
19488 {
19489 do_neon_cvttb_1 (TRUE);
19490 }
19491
19492 static void
19493 neon_move_immediate (void)
19494 {
19495 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19496 struct neon_type_el et = neon_check_type (2, rs,
19497 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19498 unsigned immlo, immhi = 0, immbits;
19499 int op, cmode, float_p;
19500
19501 constraint (et.type == NT_invtype,
19502 _("operand size must be specified for immediate VMOV"));
19503
19504 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
19505 op = (inst.instruction & (1 << 5)) != 0;
19506
19507 immlo = inst.operands[1].imm;
19508 if (inst.operands[1].regisimm)
19509 immhi = inst.operands[1].reg;
19510
19511 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19512 _("immediate has bits set outside the operand size"));
19513
19514 float_p = inst.operands[1].immisfloat;
19515
19516 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19517 et.size, et.type)) == FAIL)
19518 {
19519 /* Invert relevant bits only. */
19520 neon_invert_size (&immlo, &immhi, et.size);
19521 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19522 with one or the other; those cases are caught by
19523 neon_cmode_for_move_imm. */
19524 op = !op;
19525 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19526 &op, et.size, et.type)) == FAIL)
19527 {
19528 first_error (_("immediate out of range"));
19529 return;
19530 }
19531 }
19532
19533 inst.instruction &= ~(1 << 5);
19534 inst.instruction |= op << 5;
19535
19536 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19537 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19538 inst.instruction |= neon_quad (rs) << 6;
19539 inst.instruction |= cmode << 8;
19540
19541 neon_write_immbits (immbits);
19542 }
19543
19544 static void
19545 do_neon_mvn (void)
19546 {
19547 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
19548 return;
19549
19550 if (inst.operands[1].isreg)
19551 {
19552 enum neon_shape rs;
19553 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19554 rs = neon_select_shape (NS_QQ, NS_NULL);
19555 else
19556 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19557
19558 NEON_ENCODE (INTEGER, inst);
19559 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19560 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19561 inst.instruction |= LOW4 (inst.operands[1].reg);
19562 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19563 inst.instruction |= neon_quad (rs) << 6;
19564 }
19565 else
19566 {
19567 NEON_ENCODE (IMMED, inst);
19568 neon_move_immediate ();
19569 }
19570
19571 neon_dp_fixup (&inst);
19572
19573 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19574 {
19575 constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19576 }
19577 }
19578
19579 /* Encode instructions of form:
19580
19581 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
19582 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
19583
19584 static void
19585 neon_mixed_length (struct neon_type_el et, unsigned size)
19586 {
19587 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19588 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19589 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19590 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19591 inst.instruction |= LOW4 (inst.operands[2].reg);
19592 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19593 inst.instruction |= (et.type == NT_unsigned) << 24;
19594 inst.instruction |= neon_logbits (size) << 20;
19595
19596 neon_dp_fixup (&inst);
19597 }
19598
19599 static void
19600 do_neon_dyadic_long (void)
19601 {
19602 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
19603 if (rs == NS_QDD)
19604 {
19605 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19606 return;
19607
19608 NEON_ENCODE (INTEGER, inst);
19609 /* FIXME: Type checking for lengthening op. */
19610 struct neon_type_el et = neon_check_type (3, NS_QDD,
19611 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19612 neon_mixed_length (et, et.size);
19613 }
19614 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19615 && (inst.cond == 0xf || inst.cond == 0x10))
19616 {
19617 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19618 in an IT block with le/lt conditions. */
19619
19620 if (inst.cond == 0xf)
19621 inst.cond = 0xb;
19622 else if (inst.cond == 0x10)
19623 inst.cond = 0xd;
19624
19625 inst.pred_insn_type = INSIDE_IT_INSN;
19626
19627 if (inst.instruction == N_MNEM_vaddl)
19628 {
19629 inst.instruction = N_MNEM_vadd;
19630 do_neon_addsub_if_i ();
19631 }
19632 else if (inst.instruction == N_MNEM_vsubl)
19633 {
19634 inst.instruction = N_MNEM_vsub;
19635 do_neon_addsub_if_i ();
19636 }
19637 else if (inst.instruction == N_MNEM_vabdl)
19638 {
19639 inst.instruction = N_MNEM_vabd;
19640 do_neon_dyadic_if_su ();
19641 }
19642 }
19643 else
19644 first_error (BAD_FPU);
19645 }
19646
19647 static void
19648 do_neon_abal (void)
19649 {
19650 struct neon_type_el et = neon_check_type (3, NS_QDD,
19651 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19652 neon_mixed_length (et, et.size);
19653 }
19654
19655 static void
19656 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19657 {
19658 if (inst.operands[2].isscalar)
19659 {
19660 struct neon_type_el et = neon_check_type (3, NS_QDS,
19661 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19662 NEON_ENCODE (SCALAR, inst);
19663 neon_mul_mac (et, et.type == NT_unsigned);
19664 }
19665 else
19666 {
19667 struct neon_type_el et = neon_check_type (3, NS_QDD,
19668 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19669 NEON_ENCODE (INTEGER, inst);
19670 neon_mixed_length (et, et.size);
19671 }
19672 }
19673
19674 static void
19675 do_neon_mac_maybe_scalar_long (void)
19676 {
19677 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19678 }
19679
19680 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19681 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
19682
19683 static unsigned
19684 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19685 {
19686 unsigned regno = NEON_SCALAR_REG (scalar);
19687 unsigned elno = NEON_SCALAR_INDEX (scalar);
19688
19689 if (quad_p)
19690 {
19691 if (regno > 7 || elno > 3)
19692 goto bad_scalar;
19693
19694 return ((regno & 0x7)
19695 | ((elno & 0x1) << 3)
19696 | (((elno >> 1) & 0x1) << 5));
19697 }
19698 else
19699 {
19700 if (regno > 15 || elno > 1)
19701 goto bad_scalar;
19702
19703 return (((regno & 0x1) << 5)
19704 | ((regno >> 1) & 0x7)
19705 | ((elno & 0x1) << 3));
19706 }
19707
19708 bad_scalar:
19709 first_error (_("scalar out of range for multiply instruction"));
19710 return 0;
19711 }
19712
19713 static void
19714 do_neon_fmac_maybe_scalar_long (int subtype)
19715 {
19716 enum neon_shape rs;
19717 int high8;
19718 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
19719 field (bits[21:20]) has different meaning. For scalar index variant, it's
19720 used to differentiate add and subtract, otherwise it's with fixed value
19721 0x2. */
19722 int size = -1;
19723
19724 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19725 be a scalar index register. */
19726 if (inst.operands[2].isscalar)
19727 {
19728 high8 = 0xfe000000;
19729 if (subtype)
19730 size = 16;
19731 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19732 }
19733 else
19734 {
19735 high8 = 0xfc000000;
19736 size = 32;
19737 if (subtype)
19738 inst.instruction |= (0x1 << 23);
19739 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19740 }
19741
19742
19743 if (inst.cond != COND_ALWAYS)
19744 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19745 "behaviour is UNPREDICTABLE"));
19746
19747 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19748 _(BAD_FP16));
19749
19750 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19751 _(BAD_FPU));
19752
19753 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
19754 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19755 so we simply pass -1 as size. */
19756 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19757 neon_three_same (quad_p, 0, size);
19758
19759 /* Undo neon_dp_fixup. Redo the high eight bits. */
19760 inst.instruction &= 0x00ffffff;
19761 inst.instruction |= high8;
19762
19763 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19764 whether the instruction is in Q form and whether Vm is a scalar indexed
19765 operand. */
19766 if (inst.operands[2].isscalar)
19767 {
19768 unsigned rm
19769 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19770 inst.instruction &= 0xffffffd0;
19771 inst.instruction |= rm;
19772
19773 if (!quad_p)
19774 {
19775 /* Redo Rn as well. */
19776 inst.instruction &= 0xfff0ff7f;
19777 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19778 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19779 }
19780 }
19781 else if (!quad_p)
19782 {
19783 /* Redo Rn and Rm. */
19784 inst.instruction &= 0xfff0ff50;
19785 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19786 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19787 inst.instruction |= HI4 (inst.operands[2].reg);
19788 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19789 }
19790 }
19791
19792 static void
19793 do_neon_vfmal (void)
19794 {
19795 return do_neon_fmac_maybe_scalar_long (0);
19796 }
19797
19798 static void
19799 do_neon_vfmsl (void)
19800 {
19801 return do_neon_fmac_maybe_scalar_long (1);
19802 }
19803
19804 static void
19805 do_neon_dyadic_wide (void)
19806 {
19807 struct neon_type_el et = neon_check_type (3, NS_QQD,
19808 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19809 neon_mixed_length (et, et.size);
19810 }
19811
19812 static void
19813 do_neon_dyadic_narrow (void)
19814 {
19815 struct neon_type_el et = neon_check_type (3, NS_QDD,
19816 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19817 /* Operand sign is unimportant, and the U bit is part of the opcode,
19818 so force the operand type to integer. */
19819 et.type = NT_integer;
19820 neon_mixed_length (et, et.size / 2);
19821 }
19822
19823 static void
19824 do_neon_mul_sat_scalar_long (void)
19825 {
19826 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19827 }
19828
19829 static void
19830 do_neon_vmull (void)
19831 {
19832 if (inst.operands[2].isscalar)
19833 do_neon_mac_maybe_scalar_long ();
19834 else
19835 {
19836 struct neon_type_el et = neon_check_type (3, NS_QDD,
19837 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19838
19839 if (et.type == NT_poly)
19840 NEON_ENCODE (POLY, inst);
19841 else
19842 NEON_ENCODE (INTEGER, inst);
19843
19844 /* For polynomial encoding the U bit must be zero, and the size must
19845 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19846 obviously, as 0b10). */
19847 if (et.size == 64)
19848 {
19849 /* Check we're on the correct architecture. */
19850 if (!mark_feature_used (&fpu_crypto_ext_armv8))
19851 inst.error =
19852 _("Instruction form not available on this architecture.");
19853
19854 et.size = 32;
19855 }
19856
19857 neon_mixed_length (et, et.size);
19858 }
19859 }
19860
19861 static void
19862 do_neon_ext (void)
19863 {
19864 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19865 struct neon_type_el et = neon_check_type (3, rs,
19866 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19867 unsigned imm = (inst.operands[3].imm * et.size) / 8;
19868
19869 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19870 _("shift out of range"));
19871 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19872 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19873 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19874 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19875 inst.instruction |= LOW4 (inst.operands[2].reg);
19876 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19877 inst.instruction |= neon_quad (rs) << 6;
19878 inst.instruction |= imm << 8;
19879
19880 neon_dp_fixup (&inst);
19881 }
19882
19883 static void
19884 do_neon_rev (void)
19885 {
19886 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
19887 return;
19888
19889 enum neon_shape rs;
19890 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19891 rs = neon_select_shape (NS_QQ, NS_NULL);
19892 else
19893 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19894
19895 struct neon_type_el et = neon_check_type (2, rs,
19896 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19897
19898 unsigned op = (inst.instruction >> 7) & 3;
19899 /* N (width of reversed regions) is encoded as part of the bitmask. We
19900 extract it here to check the elements to be reversed are smaller.
19901 Otherwise we'd get a reserved instruction. */
19902 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19903
19904 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19905 && inst.operands[0].reg == inst.operands[1].reg)
19906 as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19907 " operands makes instruction UNPREDICTABLE"));
19908
19909 gas_assert (elsize != 0);
19910 constraint (et.size >= elsize,
19911 _("elements must be smaller than reversal region"));
19912 neon_two_same (neon_quad (rs), 1, et.size);
19913 }
19914
19915 static void
19916 do_neon_dup (void)
19917 {
19918 if (inst.operands[1].isscalar)
19919 {
19920 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19921 BAD_FPU);
19922 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19923 struct neon_type_el et = neon_check_type (2, rs,
19924 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19925 unsigned sizebits = et.size >> 3;
19926 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19927 int logsize = neon_logbits (et.size);
19928 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19929
19930 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19931 return;
19932
19933 NEON_ENCODE (SCALAR, inst);
19934 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19935 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19936 inst.instruction |= LOW4 (dm);
19937 inst.instruction |= HI1 (dm) << 5;
19938 inst.instruction |= neon_quad (rs) << 6;
19939 inst.instruction |= x << 17;
19940 inst.instruction |= sizebits << 16;
19941
19942 neon_dp_fixup (&inst);
19943 }
19944 else
19945 {
19946 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19947 struct neon_type_el et = neon_check_type (2, rs,
19948 N_8 | N_16 | N_32 | N_KEY, N_EQK);
19949 if (rs == NS_QR)
19950 {
19951 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
19952 return;
19953 }
19954 else
19955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19956 BAD_FPU);
19957
19958 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19959 {
19960 if (inst.operands[1].reg == REG_SP)
19961 as_tsktsk (MVE_BAD_SP);
19962 else if (inst.operands[1].reg == REG_PC)
19963 as_tsktsk (MVE_BAD_PC);
19964 }
19965
19966 /* Duplicate ARM register to lanes of vector. */
19967 NEON_ENCODE (ARMREG, inst);
19968 switch (et.size)
19969 {
19970 case 8: inst.instruction |= 0x400000; break;
19971 case 16: inst.instruction |= 0x000020; break;
19972 case 32: inst.instruction |= 0x000000; break;
19973 default: break;
19974 }
19975 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19976 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19977 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19978 inst.instruction |= neon_quad (rs) << 21;
19979 /* The encoding for this instruction is identical for the ARM and Thumb
19980 variants, except for the condition field. */
19981 do_vfp_cond_or_thumb ();
19982 }
19983 }
19984
19985 static void
19986 do_mve_mov (int toQ)
19987 {
19988 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19989 return;
19990 if (inst.cond > COND_ALWAYS)
19991 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
19992
19993 unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
19994 if (toQ)
19995 {
19996 Q0 = 0;
19997 Q1 = 1;
19998 Rt = 2;
19999 Rt2 = 3;
20000 }
20001
20002 constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
20003 _("Index one must be [2,3] and index two must be two less than"
20004 " index one."));
20005 constraint (inst.operands[Rt].reg == inst.operands[Rt2].reg,
20006 _("General purpose registers may not be the same"));
20007 constraint (inst.operands[Rt].reg == REG_SP
20008 || inst.operands[Rt2].reg == REG_SP,
20009 BAD_SP);
20010 constraint (inst.operands[Rt].reg == REG_PC
20011 || inst.operands[Rt2].reg == REG_PC,
20012 BAD_PC);
20013
20014 inst.instruction = 0xec000f00;
20015 inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
20016 inst.instruction |= !!toQ << 20;
20017 inst.instruction |= inst.operands[Rt2].reg << 16;
20018 inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
20019 inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
20020 inst.instruction |= inst.operands[Rt].reg;
20021 }
20022
20023 static void
20024 do_mve_movn (void)
20025 {
20026 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20027 return;
20028
20029 if (inst.cond > COND_ALWAYS)
20030 inst.pred_insn_type = INSIDE_VPT_INSN;
20031 else
20032 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
20033
20034 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
20035 | N_KEY);
20036
20037 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20038 inst.instruction |= (neon_logbits (et.size) - 1) << 18;
20039 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20040 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20041 inst.instruction |= LOW4 (inst.operands[1].reg);
20042 inst.is_neon = 1;
20043
20044 }
20045
20046 /* VMOV has particularly many variations. It can be one of:
20047 0. VMOV<c><q> <Qd>, <Qm>
20048 1. VMOV<c><q> <Dd>, <Dm>
20049 (Register operations, which are VORR with Rm = Rn.)
20050 2. VMOV<c><q>.<dt> <Qd>, #<imm>
20051 3. VMOV<c><q>.<dt> <Dd>, #<imm>
20052 (Immediate loads.)
20053 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
20054 (ARM register to scalar.)
20055 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20056 (Two ARM registers to vector.)
20057 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20058 (Scalar to ARM register.)
20059 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20060 (Vector to two ARM registers.)
20061 8. VMOV.F32 <Sd>, <Sm>
20062 9. VMOV.F64 <Dd>, <Dm>
20063 (VFP register moves.)
20064 10. VMOV.F32 <Sd>, #imm
20065 11. VMOV.F64 <Dd>, #imm
20066 (VFP float immediate load.)
20067 12. VMOV <Rd>, <Sm>
20068 (VFP single to ARM reg.)
20069 13. VMOV <Sd>, <Rm>
20070 (ARM reg to VFP single.)
20071 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20072 (Two ARM regs to two VFP singles.)
20073 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20074 (Two VFP singles to two ARM regs.)
20075 16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20076 17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20077 18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20078 19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20079
20080 These cases can be disambiguated using neon_select_shape, except cases 1/9
20081 and 3/11 which depend on the operand type too.
20082
20083 All the encoded bits are hardcoded by this function.
20084
20085 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20086 Cases 5, 7 may be used with VFPv2 and above.
20087
20088 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20089 can specify a type where it doesn't make sense to, and is ignored). */
20090
20091 static void
20092 do_neon_mov (void)
20093 {
20094 enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20095 NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20096 NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20097 NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20098 NS_NULL);
20099 struct neon_type_el et;
20100 const char *ldconst = 0;
20101
20102 switch (rs)
20103 {
20104 case NS_DD: /* case 1/9. */
20105 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20106 /* It is not an error here if no type is given. */
20107 inst.error = NULL;
20108
20109 /* In MVE we interpret the following instructions as same, so ignoring
20110 the following type (float) and size (64) checks.
20111 a: VMOV<c><q> <Dd>, <Dm>
20112 b: VMOV<c><q>.F64 <Dd>, <Dm>. */
20113 if ((et.type == NT_float && et.size == 64)
20114 || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20115 {
20116 do_vfp_nsyn_opcode ("fcpyd");
20117 break;
20118 }
20119 /* fall through. */
20120
20121 case NS_QQ: /* case 0/1. */
20122 {
20123 if (!check_simd_pred_availability (FALSE,
20124 NEON_CHECK_CC | NEON_CHECK_ARCH))
20125 return;
20126 /* The architecture manual I have doesn't explicitly state which
20127 value the U bit should have for register->register moves, but
20128 the equivalent VORR instruction has U = 0, so do that. */
20129 inst.instruction = 0x0200110;
20130 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20131 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20132 inst.instruction |= LOW4 (inst.operands[1].reg);
20133 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20134 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20135 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20136 inst.instruction |= neon_quad (rs) << 6;
20137
20138 neon_dp_fixup (&inst);
20139 }
20140 break;
20141
20142 case NS_DI: /* case 3/11. */
20143 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20144 inst.error = NULL;
20145 if (et.type == NT_float && et.size == 64)
20146 {
20147 /* case 11 (fconstd). */
20148 ldconst = "fconstd";
20149 goto encode_fconstd;
20150 }
20151 /* fall through. */
20152
20153 case NS_QI: /* case 2/3. */
20154 if (!check_simd_pred_availability (FALSE,
20155 NEON_CHECK_CC | NEON_CHECK_ARCH))
20156 return;
20157 inst.instruction = 0x0800010;
20158 neon_move_immediate ();
20159 neon_dp_fixup (&inst);
20160 break;
20161
20162 case NS_SR: /* case 4. */
20163 {
20164 unsigned bcdebits = 0;
20165 int logsize;
20166 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20167 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20168
20169 /* .<size> is optional here, defaulting to .32. */
20170 if (inst.vectype.elems == 0
20171 && inst.operands[0].vectype.type == NT_invtype
20172 && inst.operands[1].vectype.type == NT_invtype)
20173 {
20174 inst.vectype.el[0].type = NT_untyped;
20175 inst.vectype.el[0].size = 32;
20176 inst.vectype.elems = 1;
20177 }
20178
20179 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20180 logsize = neon_logbits (et.size);
20181
20182 if (et.size != 32)
20183 {
20184 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20185 && vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20186 return;
20187 }
20188 else
20189 {
20190 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20191 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20192 _(BAD_FPU));
20193 }
20194
20195 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20196 {
20197 if (inst.operands[1].reg == REG_SP)
20198 as_tsktsk (MVE_BAD_SP);
20199 else if (inst.operands[1].reg == REG_PC)
20200 as_tsktsk (MVE_BAD_PC);
20201 }
20202 unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20203
20204 constraint (et.type == NT_invtype, _("bad type for scalar"));
20205 constraint (x >= size / et.size, _("scalar index out of range"));
20206
20207
20208 switch (et.size)
20209 {
20210 case 8: bcdebits = 0x8; break;
20211 case 16: bcdebits = 0x1; break;
20212 case 32: bcdebits = 0x0; break;
20213 default: ;
20214 }
20215
20216 bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20217
20218 inst.instruction = 0xe000b10;
20219 do_vfp_cond_or_thumb ();
20220 inst.instruction |= LOW4 (dn) << 16;
20221 inst.instruction |= HI1 (dn) << 7;
20222 inst.instruction |= inst.operands[1].reg << 12;
20223 inst.instruction |= (bcdebits & 3) << 5;
20224 inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20225 inst.instruction |= (x >> (3-logsize)) << 16;
20226 }
20227 break;
20228
20229 case NS_DRR: /* case 5 (fmdrr). */
20230 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20231 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20232 _(BAD_FPU));
20233
20234 inst.instruction = 0xc400b10;
20235 do_vfp_cond_or_thumb ();
20236 inst.instruction |= LOW4 (inst.operands[0].reg);
20237 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20238 inst.instruction |= inst.operands[1].reg << 12;
20239 inst.instruction |= inst.operands[2].reg << 16;
20240 break;
20241
20242 case NS_RS: /* case 6. */
20243 {
20244 unsigned logsize;
20245 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20246 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20247 unsigned abcdebits = 0;
20248
20249 /* .<dt> is optional here, defaulting to .32. */
20250 if (inst.vectype.elems == 0
20251 && inst.operands[0].vectype.type == NT_invtype
20252 && inst.operands[1].vectype.type == NT_invtype)
20253 {
20254 inst.vectype.el[0].type = NT_untyped;
20255 inst.vectype.el[0].size = 32;
20256 inst.vectype.elems = 1;
20257 }
20258
20259 et = neon_check_type (2, NS_NULL,
20260 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20261 logsize = neon_logbits (et.size);
20262
20263 if (et.size != 32)
20264 {
20265 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20266 && vfp_or_neon_is_neon (NEON_CHECK_CC
20267 | NEON_CHECK_ARCH) == FAIL)
20268 return;
20269 }
20270 else
20271 {
20272 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20273 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20274 _(BAD_FPU));
20275 }
20276
20277 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20278 {
20279 if (inst.operands[0].reg == REG_SP)
20280 as_tsktsk (MVE_BAD_SP);
20281 else if (inst.operands[0].reg == REG_PC)
20282 as_tsktsk (MVE_BAD_PC);
20283 }
20284
20285 unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20286
20287 constraint (et.type == NT_invtype, _("bad type for scalar"));
20288 constraint (x >= size / et.size, _("scalar index out of range"));
20289
20290 switch (et.size)
20291 {
20292 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20293 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20294 case 32: abcdebits = 0x00; break;
20295 default: ;
20296 }
20297
20298 abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20299 inst.instruction = 0xe100b10;
20300 do_vfp_cond_or_thumb ();
20301 inst.instruction |= LOW4 (dn) << 16;
20302 inst.instruction |= HI1 (dn) << 7;
20303 inst.instruction |= inst.operands[0].reg << 12;
20304 inst.instruction |= (abcdebits & 3) << 5;
20305 inst.instruction |= (abcdebits >> 2) << 21;
20306 inst.instruction |= (x >> (3-logsize)) << 16;
20307 }
20308 break;
20309
20310 case NS_RRD: /* case 7 (fmrrd). */
20311 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20312 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20313 _(BAD_FPU));
20314
20315 inst.instruction = 0xc500b10;
20316 do_vfp_cond_or_thumb ();
20317 inst.instruction |= inst.operands[0].reg << 12;
20318 inst.instruction |= inst.operands[1].reg << 16;
20319 inst.instruction |= LOW4 (inst.operands[2].reg);
20320 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20321 break;
20322
20323 case NS_FF: /* case 8 (fcpys). */
20324 do_vfp_nsyn_opcode ("fcpys");
20325 break;
20326
20327 case NS_HI:
20328 case NS_FI: /* case 10 (fconsts). */
20329 ldconst = "fconsts";
20330 encode_fconstd:
20331 if (!inst.operands[1].immisfloat)
20332 {
20333 unsigned new_imm;
20334 /* Immediate has to fit in 8 bits so float is enough. */
20335 float imm = (float) inst.operands[1].imm;
20336 memcpy (&new_imm, &imm, sizeof (float));
20337 /* But the assembly may have been written to provide an integer
20338 bit pattern that equates to a float, so check that the
20339 conversion has worked. */
20340 if (is_quarter_float (new_imm))
20341 {
20342 if (is_quarter_float (inst.operands[1].imm))
20343 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20344
20345 inst.operands[1].imm = new_imm;
20346 inst.operands[1].immisfloat = 1;
20347 }
20348 }
20349
20350 if (is_quarter_float (inst.operands[1].imm))
20351 {
20352 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20353 do_vfp_nsyn_opcode (ldconst);
20354
20355 /* ARMv8.2 fp16 vmov.f16 instruction. */
20356 if (rs == NS_HI)
20357 do_scalar_fp16_v82_encode ();
20358 }
20359 else
20360 first_error (_("immediate out of range"));
20361 break;
20362
20363 case NS_RH:
20364 case NS_RF: /* case 12 (fmrs). */
20365 do_vfp_nsyn_opcode ("fmrs");
20366 /* ARMv8.2 fp16 vmov.f16 instruction. */
20367 if (rs == NS_RH)
20368 do_scalar_fp16_v82_encode ();
20369 break;
20370
20371 case NS_HR:
20372 case NS_FR: /* case 13 (fmsr). */
20373 do_vfp_nsyn_opcode ("fmsr");
20374 /* ARMv8.2 fp16 vmov.f16 instruction. */
20375 if (rs == NS_HR)
20376 do_scalar_fp16_v82_encode ();
20377 break;
20378
20379 case NS_RRSS:
20380 do_mve_mov (0);
20381 break;
20382 case NS_SSRR:
20383 do_mve_mov (1);
20384 break;
20385
20386 /* The encoders for the fmrrs and fmsrr instructions expect three operands
20387 (one of which is a list), but we have parsed four. Do some fiddling to
20388 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20389 expect. */
20390 case NS_RRFF: /* case 14 (fmrrs). */
20391 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20392 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20393 _(BAD_FPU));
20394 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20395 _("VFP registers must be adjacent"));
20396 inst.operands[2].imm = 2;
20397 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20398 do_vfp_nsyn_opcode ("fmrrs");
20399 break;
20400
20401 case NS_FFRR: /* case 15 (fmsrr). */
20402 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20403 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20404 _(BAD_FPU));
20405 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20406 _("VFP registers must be adjacent"));
20407 inst.operands[1] = inst.operands[2];
20408 inst.operands[2] = inst.operands[3];
20409 inst.operands[0].imm = 2;
20410 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20411 do_vfp_nsyn_opcode ("fmsrr");
20412 break;
20413
20414 case NS_NULL:
20415 /* neon_select_shape has determined that the instruction
20416 shape is wrong and has already set the error message. */
20417 break;
20418
20419 default:
20420 abort ();
20421 }
20422 }
20423
20424 static void
20425 do_mve_movl (void)
20426 {
20427 if (!(inst.operands[0].present && inst.operands[0].isquad
20428 && inst.operands[1].present && inst.operands[1].isquad
20429 && !inst.operands[2].present))
20430 {
20431 inst.instruction = 0;
20432 inst.cond = 0xb;
20433 if (thumb_mode)
20434 set_pred_insn_type (INSIDE_IT_INSN);
20435 do_neon_mov ();
20436 return;
20437 }
20438
20439 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20440 return;
20441
20442 if (inst.cond != COND_ALWAYS)
20443 inst.pred_insn_type = INSIDE_VPT_INSN;
20444
20445 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20446 | N_S16 | N_U16 | N_KEY);
20447
20448 inst.instruction |= (et.type == NT_unsigned) << 28;
20449 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20450 inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20451 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20452 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20453 inst.instruction |= LOW4 (inst.operands[1].reg);
20454 inst.is_neon = 1;
20455 }
20456
20457 static void
20458 do_neon_rshift_round_imm (void)
20459 {
20460 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20461 return;
20462
20463 enum neon_shape rs;
20464 struct neon_type_el et;
20465
20466 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20467 {
20468 rs = neon_select_shape (NS_QQI, NS_NULL);
20469 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20470 }
20471 else
20472 {
20473 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20474 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20475 }
20476 int imm = inst.operands[2].imm;
20477
20478 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
20479 if (imm == 0)
20480 {
20481 inst.operands[2].present = 0;
20482 do_neon_mov ();
20483 return;
20484 }
20485
20486 constraint (imm < 1 || (unsigned)imm > et.size,
20487 _("immediate out of range for shift"));
20488 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
20489 et.size - imm);
20490 }
20491
20492 static void
20493 do_neon_movhf (void)
20494 {
20495 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20496 constraint (rs != NS_HH, _("invalid suffix"));
20497
20498 if (inst.cond != COND_ALWAYS)
20499 {
20500 if (thumb_mode)
20501 {
20502 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20503 " the behaviour is UNPREDICTABLE"));
20504 }
20505 else
20506 {
20507 inst.error = BAD_COND;
20508 return;
20509 }
20510 }
20511
20512 do_vfp_sp_monadic ();
20513
20514 inst.is_neon = 1;
20515 inst.instruction |= 0xf0000000;
20516 }
20517
20518 static void
20519 do_neon_movl (void)
20520 {
20521 struct neon_type_el et = neon_check_type (2, NS_QD,
20522 N_EQK | N_DBL, N_SU_32 | N_KEY);
20523 unsigned sizebits = et.size >> 3;
20524 inst.instruction |= sizebits << 19;
20525 neon_two_same (0, et.type == NT_unsigned, -1);
20526 }
20527
20528 static void
20529 do_neon_trn (void)
20530 {
20531 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20532 struct neon_type_el et = neon_check_type (2, rs,
20533 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20534 NEON_ENCODE (INTEGER, inst);
20535 neon_two_same (neon_quad (rs), 1, et.size);
20536 }
20537
20538 static void
20539 do_neon_zip_uzp (void)
20540 {
20541 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20542 struct neon_type_el et = neon_check_type (2, rs,
20543 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20544 if (rs == NS_DD && et.size == 32)
20545 {
20546 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
20547 inst.instruction = N_MNEM_vtrn;
20548 do_neon_trn ();
20549 return;
20550 }
20551 neon_two_same (neon_quad (rs), 1, et.size);
20552 }
20553
20554 static void
20555 do_neon_sat_abs_neg (void)
20556 {
20557 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
20558 return;
20559
20560 enum neon_shape rs;
20561 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20562 rs = neon_select_shape (NS_QQ, NS_NULL);
20563 else
20564 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20565 struct neon_type_el et = neon_check_type (2, rs,
20566 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20567 neon_two_same (neon_quad (rs), 1, et.size);
20568 }
20569
20570 static void
20571 do_neon_pair_long (void)
20572 {
20573 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20574 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20575 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
20576 inst.instruction |= (et.type == NT_unsigned) << 7;
20577 neon_two_same (neon_quad (rs), 1, et.size);
20578 }
20579
20580 static void
20581 do_neon_recip_est (void)
20582 {
20583 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20584 struct neon_type_el et = neon_check_type (2, rs,
20585 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20586 inst.instruction |= (et.type == NT_float) << 8;
20587 neon_two_same (neon_quad (rs), 1, et.size);
20588 }
20589
20590 static void
20591 do_neon_cls (void)
20592 {
20593 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20594 return;
20595
20596 enum neon_shape rs;
20597 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20598 rs = neon_select_shape (NS_QQ, NS_NULL);
20599 else
20600 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20601
20602 struct neon_type_el et = neon_check_type (2, rs,
20603 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20604 neon_two_same (neon_quad (rs), 1, et.size);
20605 }
20606
20607 static void
20608 do_neon_clz (void)
20609 {
20610 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20611 return;
20612
20613 enum neon_shape rs;
20614 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20615 rs = neon_select_shape (NS_QQ, NS_NULL);
20616 else
20617 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20618
20619 struct neon_type_el et = neon_check_type (2, rs,
20620 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20621 neon_two_same (neon_quad (rs), 1, et.size);
20622 }
20623
20624 static void
20625 do_neon_cnt (void)
20626 {
20627 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20628 struct neon_type_el et = neon_check_type (2, rs,
20629 N_EQK | N_INT, N_8 | N_KEY);
20630 neon_two_same (neon_quad (rs), 1, et.size);
20631 }
20632
20633 static void
20634 do_neon_swp (void)
20635 {
20636 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20637 neon_two_same (neon_quad (rs), 1, -1);
20638 }
20639
20640 static void
20641 do_neon_tbl_tbx (void)
20642 {
20643 unsigned listlenbits;
20644 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20645
20646 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20647 {
20648 first_error (_("bad list length for table lookup"));
20649 return;
20650 }
20651
20652 listlenbits = inst.operands[1].imm - 1;
20653 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20654 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20655 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20656 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20657 inst.instruction |= LOW4 (inst.operands[2].reg);
20658 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20659 inst.instruction |= listlenbits << 8;
20660
20661 neon_dp_fixup (&inst);
20662 }
20663
20664 static void
20665 do_neon_ldm_stm (void)
20666 {
20667 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20668 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20669 _(BAD_FPU));
20670 /* P, U and L bits are part of bitmask. */
20671 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20672 unsigned offsetbits = inst.operands[1].imm * 2;
20673
20674 if (inst.operands[1].issingle)
20675 {
20676 do_vfp_nsyn_ldm_stm (is_dbmode);
20677 return;
20678 }
20679
20680 constraint (is_dbmode && !inst.operands[0].writeback,
20681 _("writeback (!) must be used for VLDMDB and VSTMDB"));
20682
20683 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20684 _("register list must contain at least 1 and at most 16 "
20685 "registers"));
20686
20687 inst.instruction |= inst.operands[0].reg << 16;
20688 inst.instruction |= inst.operands[0].writeback << 21;
20689 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20690 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20691
20692 inst.instruction |= offsetbits;
20693
20694 do_vfp_cond_or_thumb ();
20695 }
20696
20697 static void
20698 do_vfp_nsyn_pop (void)
20699 {
20700 nsyn_insert_sp ();
20701 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20702 return do_vfp_nsyn_opcode ("vldm");
20703 }
20704
20705 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20706 _(BAD_FPU));
20707
20708 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20709 _("register list must contain at least 1 and at most 16 "
20710 "registers"));
20711
20712 if (inst.operands[1].issingle)
20713 do_vfp_nsyn_opcode ("fldmias");
20714 else
20715 do_vfp_nsyn_opcode ("fldmiad");
20716 }
20717
20718 static void
20719 do_vfp_nsyn_push (void)
20720 {
20721 nsyn_insert_sp ();
20722 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20723 return do_vfp_nsyn_opcode ("vstmdb");
20724 }
20725
20726 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20727 _(BAD_FPU));
20728
20729 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20730 _("register list must contain at least 1 and at most 16 "
20731 "registers"));
20732
20733 if (inst.operands[1].issingle)
20734 do_vfp_nsyn_opcode ("fstmdbs");
20735 else
20736 do_vfp_nsyn_opcode ("fstmdbd");
20737 }
20738
20739
20740 static void
20741 do_neon_ldr_str (void)
20742 {
20743 int is_ldr = (inst.instruction & (1 << 20)) != 0;
20744
20745 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20746 And is UNPREDICTABLE in thumb mode. */
20747 if (!is_ldr
20748 && inst.operands[1].reg == REG_PC
20749 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20750 {
20751 if (thumb_mode)
20752 inst.error = _("Use of PC here is UNPREDICTABLE");
20753 else if (warn_on_deprecated)
20754 as_tsktsk (_("Use of PC here is deprecated"));
20755 }
20756
20757 if (inst.operands[0].issingle)
20758 {
20759 if (is_ldr)
20760 do_vfp_nsyn_opcode ("flds");
20761 else
20762 do_vfp_nsyn_opcode ("fsts");
20763
20764 /* ARMv8.2 vldr.16/vstr.16 instruction. */
20765 if (inst.vectype.el[0].size == 16)
20766 do_scalar_fp16_v82_encode ();
20767 }
20768 else
20769 {
20770 if (is_ldr)
20771 do_vfp_nsyn_opcode ("fldd");
20772 else
20773 do_vfp_nsyn_opcode ("fstd");
20774 }
20775 }
20776
20777 static void
20778 do_t_vldr_vstr_sysreg (void)
20779 {
20780 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20781 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20782
20783 /* Use of PC is UNPREDICTABLE. */
20784 if (inst.operands[1].reg == REG_PC)
20785 inst.error = _("Use of PC here is UNPREDICTABLE");
20786
20787 if (inst.operands[1].immisreg)
20788 inst.error = _("instruction does not accept register index");
20789
20790 if (!inst.operands[1].isreg)
20791 inst.error = _("instruction does not accept PC-relative addressing");
20792
20793 if (abs (inst.operands[1].imm) >= (1 << 7))
20794 inst.error = _("immediate value out of range");
20795
20796 inst.instruction = 0xec000f80;
20797 if (is_vldr)
20798 inst.instruction |= 1 << sysreg_vldr_bitno;
20799 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20800 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20801 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20802 }
20803
20804 static void
20805 do_vldr_vstr (void)
20806 {
20807 bfd_boolean sysreg_op = !inst.operands[0].isreg;
20808
20809 /* VLDR/VSTR (System Register). */
20810 if (sysreg_op)
20811 {
20812 if (!mark_feature_used (&arm_ext_v8_1m_main))
20813 as_bad (_("Instruction not permitted on this architecture"));
20814
20815 do_t_vldr_vstr_sysreg ();
20816 }
20817 /* VLDR/VSTR. */
20818 else
20819 {
20820 if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20821 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20822 as_bad (_("Instruction not permitted on this architecture"));
20823 do_neon_ldr_str ();
20824 }
20825 }
20826
20827 /* "interleave" version also handles non-interleaving register VLD1/VST1
20828 instructions. */
20829
20830 static void
20831 do_neon_ld_st_interleave (void)
20832 {
20833 struct neon_type_el et = neon_check_type (1, NS_NULL,
20834 N_8 | N_16 | N_32 | N_64);
20835 unsigned alignbits = 0;
20836 unsigned idx;
20837 /* The bits in this table go:
20838 0: register stride of one (0) or two (1)
20839 1,2: register list length, minus one (1, 2, 3, 4).
20840 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20841 We use -1 for invalid entries. */
20842 const int typetable[] =
20843 {
20844 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
20845 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
20846 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
20847 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
20848 };
20849 int typebits;
20850
20851 if (et.type == NT_invtype)
20852 return;
20853
20854 if (inst.operands[1].immisalign)
20855 switch (inst.operands[1].imm >> 8)
20856 {
20857 case 64: alignbits = 1; break;
20858 case 128:
20859 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20860 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20861 goto bad_alignment;
20862 alignbits = 2;
20863 break;
20864 case 256:
20865 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20866 goto bad_alignment;
20867 alignbits = 3;
20868 break;
20869 default:
20870 bad_alignment:
20871 first_error (_("bad alignment"));
20872 return;
20873 }
20874
20875 inst.instruction |= alignbits << 4;
20876 inst.instruction |= neon_logbits (et.size) << 6;
20877
20878 /* Bits [4:6] of the immediate in a list specifier encode register stride
20879 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20880 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20881 up the right value for "type" in a table based on this value and the given
20882 list style, then stick it back. */
20883 idx = ((inst.operands[0].imm >> 4) & 7)
20884 | (((inst.instruction >> 8) & 3) << 3);
20885
20886 typebits = typetable[idx];
20887
20888 constraint (typebits == -1, _("bad list type for instruction"));
20889 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20890 BAD_EL_TYPE);
20891
20892 inst.instruction &= ~0xf00;
20893 inst.instruction |= typebits << 8;
20894 }
20895
20896 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20897 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20898 otherwise. The variable arguments are a list of pairs of legal (size, align)
20899 values, terminated with -1. */
20900
20901 static int
20902 neon_alignment_bit (int size, int align, int *do_alignment, ...)
20903 {
20904 va_list ap;
20905 int result = FAIL, thissize, thisalign;
20906
20907 if (!inst.operands[1].immisalign)
20908 {
20909 *do_alignment = 0;
20910 return SUCCESS;
20911 }
20912
20913 va_start (ap, do_alignment);
20914
20915 do
20916 {
20917 thissize = va_arg (ap, int);
20918 if (thissize == -1)
20919 break;
20920 thisalign = va_arg (ap, int);
20921
20922 if (size == thissize && align == thisalign)
20923 result = SUCCESS;
20924 }
20925 while (result != SUCCESS);
20926
20927 va_end (ap);
20928
20929 if (result == SUCCESS)
20930 *do_alignment = 1;
20931 else
20932 first_error (_("unsupported alignment for instruction"));
20933
20934 return result;
20935 }
20936
20937 static void
20938 do_neon_ld_st_lane (void)
20939 {
20940 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20941 int align_good, do_alignment = 0;
20942 int logsize = neon_logbits (et.size);
20943 int align = inst.operands[1].imm >> 8;
20944 int n = (inst.instruction >> 8) & 3;
20945 int max_el = 64 / et.size;
20946
20947 if (et.type == NT_invtype)
20948 return;
20949
20950 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20951 _("bad list length"));
20952 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20953 _("scalar index out of range"));
20954 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20955 && et.size == 8,
20956 _("stride of 2 unavailable when element size is 8"));
20957
20958 switch (n)
20959 {
20960 case 0: /* VLD1 / VST1. */
20961 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20962 32, 32, -1);
20963 if (align_good == FAIL)
20964 return;
20965 if (do_alignment)
20966 {
20967 unsigned alignbits = 0;
20968 switch (et.size)
20969 {
20970 case 16: alignbits = 0x1; break;
20971 case 32: alignbits = 0x3; break;
20972 default: ;
20973 }
20974 inst.instruction |= alignbits << 4;
20975 }
20976 break;
20977
20978 case 1: /* VLD2 / VST2. */
20979 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20980 16, 32, 32, 64, -1);
20981 if (align_good == FAIL)
20982 return;
20983 if (do_alignment)
20984 inst.instruction |= 1 << 4;
20985 break;
20986
20987 case 2: /* VLD3 / VST3. */
20988 constraint (inst.operands[1].immisalign,
20989 _("can't use alignment with this instruction"));
20990 break;
20991
20992 case 3: /* VLD4 / VST4. */
20993 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20994 16, 64, 32, 64, 32, 128, -1);
20995 if (align_good == FAIL)
20996 return;
20997 if (do_alignment)
20998 {
20999 unsigned alignbits = 0;
21000 switch (et.size)
21001 {
21002 case 8: alignbits = 0x1; break;
21003 case 16: alignbits = 0x1; break;
21004 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
21005 default: ;
21006 }
21007 inst.instruction |= alignbits << 4;
21008 }
21009 break;
21010
21011 default: ;
21012 }
21013
21014 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
21015 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21016 inst.instruction |= 1 << (4 + logsize);
21017
21018 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
21019 inst.instruction |= logsize << 10;
21020 }
21021
21022 /* Encode single n-element structure to all lanes VLD<n> instructions. */
21023
21024 static void
21025 do_neon_ld_dup (void)
21026 {
21027 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
21028 int align_good, do_alignment = 0;
21029
21030 if (et.type == NT_invtype)
21031 return;
21032
21033 switch ((inst.instruction >> 8) & 3)
21034 {
21035 case 0: /* VLD1. */
21036 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
21037 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21038 &do_alignment, 16, 16, 32, 32, -1);
21039 if (align_good == FAIL)
21040 return;
21041 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
21042 {
21043 case 1: break;
21044 case 2: inst.instruction |= 1 << 5; break;
21045 default: first_error (_("bad list length")); return;
21046 }
21047 inst.instruction |= neon_logbits (et.size) << 6;
21048 break;
21049
21050 case 1: /* VLD2. */
21051 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21052 &do_alignment, 8, 16, 16, 32, 32, 64,
21053 -1);
21054 if (align_good == FAIL)
21055 return;
21056 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21057 _("bad list length"));
21058 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21059 inst.instruction |= 1 << 5;
21060 inst.instruction |= neon_logbits (et.size) << 6;
21061 break;
21062
21063 case 2: /* VLD3. */
21064 constraint (inst.operands[1].immisalign,
21065 _("can't use alignment with this instruction"));
21066 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21067 _("bad list length"));
21068 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21069 inst.instruction |= 1 << 5;
21070 inst.instruction |= neon_logbits (et.size) << 6;
21071 break;
21072
21073 case 3: /* VLD4. */
21074 {
21075 int align = inst.operands[1].imm >> 8;
21076 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21077 16, 64, 32, 64, 32, 128, -1);
21078 if (align_good == FAIL)
21079 return;
21080 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21081 _("bad list length"));
21082 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21083 inst.instruction |= 1 << 5;
21084 if (et.size == 32 && align == 128)
21085 inst.instruction |= 0x3 << 6;
21086 else
21087 inst.instruction |= neon_logbits (et.size) << 6;
21088 }
21089 break;
21090
21091 default: ;
21092 }
21093
21094 inst.instruction |= do_alignment << 4;
21095 }
21096
21097 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21098 apart from bits [11:4]. */
21099
21100 static void
21101 do_neon_ldx_stx (void)
21102 {
21103 if (inst.operands[1].isreg)
21104 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21105
21106 switch (NEON_LANE (inst.operands[0].imm))
21107 {
21108 case NEON_INTERLEAVE_LANES:
21109 NEON_ENCODE (INTERLV, inst);
21110 do_neon_ld_st_interleave ();
21111 break;
21112
21113 case NEON_ALL_LANES:
21114 NEON_ENCODE (DUP, inst);
21115 if (inst.instruction == N_INV)
21116 {
21117 first_error ("only loads support such operands");
21118 break;
21119 }
21120 do_neon_ld_dup ();
21121 break;
21122
21123 default:
21124 NEON_ENCODE (LANE, inst);
21125 do_neon_ld_st_lane ();
21126 }
21127
21128 /* L bit comes from bit mask. */
21129 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21130 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21131 inst.instruction |= inst.operands[1].reg << 16;
21132
21133 if (inst.operands[1].postind)
21134 {
21135 int postreg = inst.operands[1].imm & 0xf;
21136 constraint (!inst.operands[1].immisreg,
21137 _("post-index must be a register"));
21138 constraint (postreg == 0xd || postreg == 0xf,
21139 _("bad register for post-index"));
21140 inst.instruction |= postreg;
21141 }
21142 else
21143 {
21144 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21145 constraint (inst.relocs[0].exp.X_op != O_constant
21146 || inst.relocs[0].exp.X_add_number != 0,
21147 BAD_ADDR_MODE);
21148
21149 if (inst.operands[1].writeback)
21150 {
21151 inst.instruction |= 0xd;
21152 }
21153 else
21154 inst.instruction |= 0xf;
21155 }
21156
21157 if (thumb_mode)
21158 inst.instruction |= 0xf9000000;
21159 else
21160 inst.instruction |= 0xf4000000;
21161 }
21162
21163 /* FP v8. */
21164 static void
21165 do_vfp_nsyn_fpv8 (enum neon_shape rs)
21166 {
21167 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21168 D register operands. */
21169 if (neon_shape_class[rs] == SC_DOUBLE)
21170 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21171 _(BAD_FPU));
21172
21173 NEON_ENCODE (FPV8, inst);
21174
21175 if (rs == NS_FFF || rs == NS_HHH)
21176 {
21177 do_vfp_sp_dyadic ();
21178
21179 /* ARMv8.2 fp16 instruction. */
21180 if (rs == NS_HHH)
21181 do_scalar_fp16_v82_encode ();
21182 }
21183 else
21184 do_vfp_dp_rd_rn_rm ();
21185
21186 if (rs == NS_DDD)
21187 inst.instruction |= 0x100;
21188
21189 inst.instruction |= 0xf0000000;
21190 }
21191
21192 static void
21193 do_vsel (void)
21194 {
21195 set_pred_insn_type (OUTSIDE_PRED_INSN);
21196
21197 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21198 first_error (_("invalid instruction shape"));
21199 }
21200
21201 static void
21202 do_vmaxnm (void)
21203 {
21204 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21205 set_pred_insn_type (OUTSIDE_PRED_INSN);
21206
21207 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21208 return;
21209
21210 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21211 return;
21212
21213 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21214 }
21215
21216 static void
21217 do_vrint_1 (enum neon_cvt_mode mode)
21218 {
21219 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21220 struct neon_type_el et;
21221
21222 if (rs == NS_NULL)
21223 return;
21224
21225 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21226 D register operands. */
21227 if (neon_shape_class[rs] == SC_DOUBLE)
21228 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21229 _(BAD_FPU));
21230
21231 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21232 | N_VFP);
21233 if (et.type != NT_invtype)
21234 {
21235 /* VFP encodings. */
21236 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21237 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21238 set_pred_insn_type (OUTSIDE_PRED_INSN);
21239
21240 NEON_ENCODE (FPV8, inst);
21241 if (rs == NS_FF || rs == NS_HH)
21242 do_vfp_sp_monadic ();
21243 else
21244 do_vfp_dp_rd_rm ();
21245
21246 switch (mode)
21247 {
21248 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21249 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21250 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21251 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21252 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21253 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21254 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21255 default: abort ();
21256 }
21257
21258 inst.instruction |= (rs == NS_DD) << 8;
21259 do_vfp_cond_or_thumb ();
21260
21261 /* ARMv8.2 fp16 vrint instruction. */
21262 if (rs == NS_HH)
21263 do_scalar_fp16_v82_encode ();
21264 }
21265 else
21266 {
21267 /* Neon encodings (or something broken...). */
21268 inst.error = NULL;
21269 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21270
21271 if (et.type == NT_invtype)
21272 return;
21273
21274 if (!check_simd_pred_availability (TRUE,
21275 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21276 return;
21277
21278 NEON_ENCODE (FLOAT, inst);
21279
21280 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21281 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21282 inst.instruction |= LOW4 (inst.operands[1].reg);
21283 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21284 inst.instruction |= neon_quad (rs) << 6;
21285 /* Mask off the original size bits and reencode them. */
21286 inst.instruction = ((inst.instruction & 0xfff3ffff)
21287 | neon_logbits (et.size) << 18);
21288
21289 switch (mode)
21290 {
21291 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21292 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21293 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21294 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21295 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21296 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21297 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21298 default: abort ();
21299 }
21300
21301 if (thumb_mode)
21302 inst.instruction |= 0xfc000000;
21303 else
21304 inst.instruction |= 0xf0000000;
21305 }
21306 }
21307
21308 static void
21309 do_vrintx (void)
21310 {
21311 do_vrint_1 (neon_cvt_mode_x);
21312 }
21313
21314 static void
21315 do_vrintz (void)
21316 {
21317 do_vrint_1 (neon_cvt_mode_z);
21318 }
21319
21320 static void
21321 do_vrintr (void)
21322 {
21323 do_vrint_1 (neon_cvt_mode_r);
21324 }
21325
21326 static void
21327 do_vrinta (void)
21328 {
21329 do_vrint_1 (neon_cvt_mode_a);
21330 }
21331
21332 static void
21333 do_vrintn (void)
21334 {
21335 do_vrint_1 (neon_cvt_mode_n);
21336 }
21337
21338 static void
21339 do_vrintp (void)
21340 {
21341 do_vrint_1 (neon_cvt_mode_p);
21342 }
21343
21344 static void
21345 do_vrintm (void)
21346 {
21347 do_vrint_1 (neon_cvt_mode_m);
21348 }
21349
21350 static unsigned
21351 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21352 {
21353 unsigned regno = NEON_SCALAR_REG (opnd);
21354 unsigned elno = NEON_SCALAR_INDEX (opnd);
21355
21356 if (elsize == 16 && elno < 2 && regno < 16)
21357 return regno | (elno << 4);
21358 else if (elsize == 32 && elno == 0)
21359 return regno;
21360
21361 first_error (_("scalar out of range"));
21362 return 0;
21363 }
21364
21365 static void
21366 do_vcmla (void)
21367 {
21368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21369 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21370 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21371 constraint (inst.relocs[0].exp.X_op != O_constant,
21372 _("expression too complex"));
21373 unsigned rot = inst.relocs[0].exp.X_add_number;
21374 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21375 _("immediate out of range"));
21376 rot /= 90;
21377
21378 if (!check_simd_pred_availability (TRUE,
21379 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21380 return;
21381
21382 if (inst.operands[2].isscalar)
21383 {
21384 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21385 first_error (_("invalid instruction shape"));
21386 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21387 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21388 N_KEY | N_F16 | N_F32).size;
21389 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21390 inst.is_neon = 1;
21391 inst.instruction = 0xfe000800;
21392 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21393 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21394 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21395 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21396 inst.instruction |= LOW4 (m);
21397 inst.instruction |= HI1 (m) << 5;
21398 inst.instruction |= neon_quad (rs) << 6;
21399 inst.instruction |= rot << 20;
21400 inst.instruction |= (size == 32) << 23;
21401 }
21402 else
21403 {
21404 enum neon_shape rs;
21405 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21406 rs = neon_select_shape (NS_QQQI, NS_NULL);
21407 else
21408 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21409
21410 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21411 N_KEY | N_F16 | N_F32).size;
21412 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21413 && (inst.operands[0].reg == inst.operands[1].reg
21414 || inst.operands[0].reg == inst.operands[2].reg))
21415 as_tsktsk (BAD_MVE_SRCDEST);
21416
21417 neon_three_same (neon_quad (rs), 0, -1);
21418 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21419 inst.instruction |= 0xfc200800;
21420 inst.instruction |= rot << 23;
21421 inst.instruction |= (size == 32) << 20;
21422 }
21423 }
21424
21425 static void
21426 do_vcadd (void)
21427 {
21428 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21429 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21430 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21431 constraint (inst.relocs[0].exp.X_op != O_constant,
21432 _("expression too complex"));
21433
21434 unsigned rot = inst.relocs[0].exp.X_add_number;
21435 constraint (rot != 90 && rot != 270, _("immediate out of range"));
21436 enum neon_shape rs;
21437 struct neon_type_el et;
21438 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21439 {
21440 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21441 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21442 }
21443 else
21444 {
21445 rs = neon_select_shape (NS_QQQI, NS_NULL);
21446 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21447 | N_I16 | N_I32);
21448 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21449 as_tsktsk (_("Warning: 32-bit element size and same first and third "
21450 "operand makes instruction UNPREDICTABLE"));
21451 }
21452
21453 if (et.type == NT_invtype)
21454 return;
21455
21456 if (!check_simd_pred_availability (et.type == NT_float,
21457 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21458 return;
21459
21460 if (et.type == NT_float)
21461 {
21462 neon_three_same (neon_quad (rs), 0, -1);
21463 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21464 inst.instruction |= 0xfc800800;
21465 inst.instruction |= (rot == 270) << 24;
21466 inst.instruction |= (et.size == 32) << 20;
21467 }
21468 else
21469 {
21470 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21471 inst.instruction = 0xfe000f00;
21472 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21473 inst.instruction |= neon_logbits (et.size) << 20;
21474 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21475 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21476 inst.instruction |= (rot == 270) << 12;
21477 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21478 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21479 inst.instruction |= LOW4 (inst.operands[2].reg);
21480 inst.is_neon = 1;
21481 }
21482 }
21483
21484 /* Dot Product instructions encoding support. */
21485
21486 static void
21487 do_neon_dotproduct (int unsigned_p)
21488 {
21489 enum neon_shape rs;
21490 unsigned scalar_oprd2 = 0;
21491 int high8;
21492
21493 if (inst.cond != COND_ALWAYS)
21494 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
21495 "is UNPREDICTABLE"));
21496
21497 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21498 _(BAD_FPU));
21499
21500 /* Dot Product instructions are in three-same D/Q register format or the third
21501 operand can be a scalar index register. */
21502 if (inst.operands[2].isscalar)
21503 {
21504 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21505 high8 = 0xfe000000;
21506 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21507 }
21508 else
21509 {
21510 high8 = 0xfc000000;
21511 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21512 }
21513
21514 if (unsigned_p)
21515 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21516 else
21517 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21518
21519 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21520 Product instruction, so we pass 0 as the "ubit" parameter. And the
21521 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
21522 neon_three_same (neon_quad (rs), 0, 32);
21523
21524 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
21525 different NEON three-same encoding. */
21526 inst.instruction &= 0x00ffffff;
21527 inst.instruction |= high8;
21528 /* Encode 'U' bit which indicates signedness. */
21529 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21530 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
21531 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21532 the instruction encoding. */
21533 if (inst.operands[2].isscalar)
21534 {
21535 inst.instruction &= 0xffffffd0;
21536 inst.instruction |= LOW4 (scalar_oprd2);
21537 inst.instruction |= HI1 (scalar_oprd2) << 5;
21538 }
21539 }
21540
21541 /* Dot Product instructions for signed integer. */
21542
21543 static void
21544 do_neon_dotproduct_s (void)
21545 {
21546 return do_neon_dotproduct (0);
21547 }
21548
21549 /* Dot Product instructions for unsigned integer. */
21550
21551 static void
21552 do_neon_dotproduct_u (void)
21553 {
21554 return do_neon_dotproduct (1);
21555 }
21556
21557 static void
21558 do_vusdot (void)
21559 {
21560 enum neon_shape rs;
21561 set_pred_insn_type (OUTSIDE_PRED_INSN);
21562 if (inst.operands[2].isscalar)
21563 {
21564 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21565 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21566
21567 inst.instruction |= (1 << 25);
21568 int index = inst.operands[2].reg & 0xf;
21569 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21570 inst.operands[2].reg >>= 4;
21571 constraint (!(inst.operands[2].reg < 16),
21572 _("indexed register must be less than 16"));
21573 neon_three_args (rs == NS_QQS);
21574 inst.instruction |= (index << 5);
21575 }
21576 else
21577 {
21578 inst.instruction |= (1 << 21);
21579 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21580 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21581 neon_three_args (rs == NS_QQQ);
21582 }
21583 }
21584
21585 static void
21586 do_vsudot (void)
21587 {
21588 enum neon_shape rs;
21589 set_pred_insn_type (OUTSIDE_PRED_INSN);
21590 if (inst.operands[2].isscalar)
21591 {
21592 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21593 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21594
21595 inst.instruction |= (1 << 25);
21596 int index = inst.operands[2].reg & 0xf;
21597 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21598 inst.operands[2].reg >>= 4;
21599 constraint (!(inst.operands[2].reg < 16),
21600 _("indexed register must be less than 16"));
21601 neon_three_args (rs == NS_QQS);
21602 inst.instruction |= (index << 5);
21603 }
21604 }
21605
21606 static void
21607 do_vsmmla (void)
21608 {
21609 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21610 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21611
21612 set_pred_insn_type (OUTSIDE_PRED_INSN);
21613
21614 neon_three_args (1);
21615
21616 }
21617
21618 static void
21619 do_vummla (void)
21620 {
21621 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21622 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21623
21624 set_pred_insn_type (OUTSIDE_PRED_INSN);
21625
21626 neon_three_args (1);
21627
21628 }
21629
21630 static void
21631 check_cde_operand (size_t index, int is_dual)
21632 {
21633 unsigned Rx = inst.operands[index].reg;
21634 bfd_boolean isvec = inst.operands[index].isvec;
21635 if (is_dual == 0 && thumb_mode)
21636 constraint (
21637 !((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
21638 _("Register must be r0-r14 except r13, or APSR_nzcv."));
21639 else
21640 constraint ( !((Rx <= 10 && Rx % 2 == 0 )),
21641 _("Register must be an even register between r0-r10."));
21642 }
21643
21644 static bfd_boolean
21645 cde_coproc_enabled (unsigned coproc)
21646 {
21647 switch (coproc)
21648 {
21649 case 0: return mark_feature_used (&arm_ext_cde0);
21650 case 1: return mark_feature_used (&arm_ext_cde1);
21651 case 2: return mark_feature_used (&arm_ext_cde2);
21652 case 3: return mark_feature_used (&arm_ext_cde3);
21653 case 4: return mark_feature_used (&arm_ext_cde4);
21654 case 5: return mark_feature_used (&arm_ext_cde5);
21655 case 6: return mark_feature_used (&arm_ext_cde6);
21656 case 7: return mark_feature_used (&arm_ext_cde7);
21657 default: return FALSE;
21658 }
21659 }
21660
21661 #define cde_coproc_pos 8
21662 static void
21663 cde_handle_coproc (void)
21664 {
21665 unsigned coproc = inst.operands[0].reg;
21666 constraint (coproc > 7, _("CDE Coprocessor must be in range 0-7"));
21667 constraint (!(cde_coproc_enabled (coproc)), BAD_CDE_COPROC);
21668 inst.instruction |= coproc << cde_coproc_pos;
21669 }
21670 #undef cde_coproc_pos
21671
21672 static void
21673 cxn_handle_predication (bfd_boolean is_accum)
21674 {
21675 if (is_accum && conditional_insn ())
21676 set_pred_insn_type (INSIDE_IT_INSN);
21677 else if (conditional_insn ())
21678 /* conditional_insn essentially checks for a suffix, not whether the
21679 instruction is inside an IT block or not.
21680 The non-accumulator versions should not have suffixes. */
21681 inst.error = BAD_SYNTAX;
21682 else
21683 set_pred_insn_type (OUTSIDE_PRED_INSN);
21684 }
21685
21686 static void
21687 do_custom_instruction_1 (int is_dual, bfd_boolean is_accum)
21688 {
21689
21690 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21691
21692 unsigned imm, Rd;
21693
21694 Rd = inst.operands[1].reg;
21695 check_cde_operand (1, is_dual);
21696
21697 if (is_dual == 1)
21698 {
21699 constraint (inst.operands[2].reg != Rd + 1,
21700 _("cx1d requires consecutive destination registers."));
21701 imm = inst.operands[3].imm;
21702 }
21703 else if (is_dual == 0)
21704 imm = inst.operands[2].imm;
21705 else
21706 abort ();
21707
21708 inst.instruction |= Rd << 12;
21709 inst.instruction |= (imm & 0x1F80) << 9;
21710 inst.instruction |= (imm & 0x0040) << 1;
21711 inst.instruction |= (imm & 0x003f);
21712
21713 cde_handle_coproc ();
21714 cxn_handle_predication (is_accum);
21715 }
21716
21717 static void
21718 do_custom_instruction_2 (int is_dual, bfd_boolean is_accum)
21719 {
21720
21721 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21722
21723 unsigned imm, Rd, Rn;
21724
21725 Rd = inst.operands[1].reg;
21726
21727 if (is_dual == 1)
21728 {
21729 constraint (inst.operands[2].reg != Rd + 1,
21730 _("cx2d requires consecutive destination registers."));
21731 imm = inst.operands[4].imm;
21732 Rn = inst.operands[3].reg;
21733 }
21734 else if (is_dual == 0)
21735 {
21736 imm = inst.operands[3].imm;
21737 Rn = inst.operands[2].reg;
21738 }
21739 else
21740 abort ();
21741
21742 check_cde_operand (2 + is_dual, /* is_dual = */0);
21743 check_cde_operand (1, is_dual);
21744
21745 inst.instruction |= Rd << 12;
21746 inst.instruction |= Rn << 16;
21747
21748 inst.instruction |= (imm & 0x0380) << 13;
21749 inst.instruction |= (imm & 0x0040) << 1;
21750 inst.instruction |= (imm & 0x003f);
21751
21752 cde_handle_coproc ();
21753 cxn_handle_predication (is_accum);
21754 }
21755
21756 static void
21757 do_custom_instruction_3 (int is_dual, bfd_boolean is_accum)
21758 {
21759
21760 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21761
21762 unsigned imm, Rd, Rn, Rm;
21763
21764 Rd = inst.operands[1].reg;
21765
21766 if (is_dual == 1)
21767 {
21768 constraint (inst.operands[2].reg != Rd + 1,
21769 _("cx3d requires consecutive destination registers."));
21770 imm = inst.operands[5].imm;
21771 Rn = inst.operands[3].reg;
21772 Rm = inst.operands[4].reg;
21773 }
21774 else if (is_dual == 0)
21775 {
21776 imm = inst.operands[4].imm;
21777 Rn = inst.operands[2].reg;
21778 Rm = inst.operands[3].reg;
21779 }
21780 else
21781 abort ();
21782
21783 check_cde_operand (1, is_dual);
21784 check_cde_operand (2 + is_dual, /* is_dual = */0);
21785 check_cde_operand (3 + is_dual, /* is_dual = */0);
21786
21787 inst.instruction |= Rd;
21788 inst.instruction |= Rn << 16;
21789 inst.instruction |= Rm << 12;
21790
21791 inst.instruction |= (imm & 0x0038) << 17;
21792 inst.instruction |= (imm & 0x0004) << 5;
21793 inst.instruction |= (imm & 0x0003) << 4;
21794
21795 cde_handle_coproc ();
21796 cxn_handle_predication (is_accum);
21797 }
21798
21799 static void
21800 do_cx1 (void)
21801 {
21802 return do_custom_instruction_1 (0, 0);
21803 }
21804
21805 static void
21806 do_cx1a (void)
21807 {
21808 return do_custom_instruction_1 (0, 1);
21809 }
21810
21811 static void
21812 do_cx1d (void)
21813 {
21814 return do_custom_instruction_1 (1, 0);
21815 }
21816
21817 static void
21818 do_cx1da (void)
21819 {
21820 return do_custom_instruction_1 (1, 1);
21821 }
21822
21823 static void
21824 do_cx2 (void)
21825 {
21826 return do_custom_instruction_2 (0, 0);
21827 }
21828
21829 static void
21830 do_cx2a (void)
21831 {
21832 return do_custom_instruction_2 (0, 1);
21833 }
21834
21835 static void
21836 do_cx2d (void)
21837 {
21838 return do_custom_instruction_2 (1, 0);
21839 }
21840
21841 static void
21842 do_cx2da (void)
21843 {
21844 return do_custom_instruction_2 (1, 1);
21845 }
21846
21847 static void
21848 do_cx3 (void)
21849 {
21850 return do_custom_instruction_3 (0, 0);
21851 }
21852
21853 static void
21854 do_cx3a (void)
21855 {
21856 return do_custom_instruction_3 (0, 1);
21857 }
21858
21859 static void
21860 do_cx3d (void)
21861 {
21862 return do_custom_instruction_3 (1, 0);
21863 }
21864
21865 static void
21866 do_cx3da (void)
21867 {
21868 return do_custom_instruction_3 (1, 1);
21869 }
21870
21871 static void
21872 vcx_assign_vec_d (unsigned regnum)
21873 {
21874 inst.instruction |= HI4 (regnum) << 12;
21875 inst.instruction |= LOW1 (regnum) << 22;
21876 }
21877
21878 static void
21879 vcx_assign_vec_m (unsigned regnum)
21880 {
21881 inst.instruction |= HI4 (regnum);
21882 inst.instruction |= LOW1 (regnum) << 5;
21883 }
21884
21885 static void
21886 vcx_assign_vec_n (unsigned regnum)
21887 {
21888 inst.instruction |= HI4 (regnum) << 16;
21889 inst.instruction |= LOW1 (regnum) << 7;
21890 }
21891
21892 enum vcx_reg_type {
21893 q_reg,
21894 d_reg,
21895 s_reg
21896 };
21897
21898 static enum vcx_reg_type
21899 vcx_get_reg_type (enum neon_shape ns)
21900 {
21901 gas_assert (ns == NS_PQI
21902 || ns == NS_PDI
21903 || ns == NS_PFI
21904 || ns == NS_PQQI
21905 || ns == NS_PDDI
21906 || ns == NS_PFFI
21907 || ns == NS_PQQQI
21908 || ns == NS_PDDDI
21909 || ns == NS_PFFFI);
21910 if (ns == NS_PQI || ns == NS_PQQI || ns == NS_PQQQI)
21911 return q_reg;
21912 if (ns == NS_PDI || ns == NS_PDDI || ns == NS_PDDDI)
21913 return d_reg;
21914 return s_reg;
21915 }
21916
21917 #define vcx_size_pos 24
21918 #define vcx_vec_pos 6
21919 static unsigned
21920 vcx_handle_shape (enum vcx_reg_type reg_type)
21921 {
21922 unsigned mult = 2;
21923 if (reg_type == q_reg)
21924 inst.instruction |= 1 << vcx_vec_pos;
21925 else if (reg_type == d_reg)
21926 inst.instruction |= 1 << vcx_size_pos;
21927 else
21928 mult = 1;
21929 /* NOTE:
21930 The documentation says that the Q registers are encoded as 2*N in the D:Vd
21931 bits (or equivalent for N and M registers).
21932 Similarly the D registers are encoded as N in D:Vd bits.
21933 While the S registers are encoded as N in the Vd:D bits.
21934
21935 Taking into account the maximum values of these registers we can see a
21936 nicer pattern for calculation:
21937 Q -> 7, D -> 15, S -> 31
21938
21939 If we say that everything is encoded in the Vd:D bits, then we can say
21940 that Q is encoded as 4*N, and D is encoded as 2*N.
21941 This way the bits will end up the same, and calculation is simpler.
21942 (calculation is now:
21943 1. Multiply by a number determined by the register letter.
21944 2. Encode resulting number in Vd:D bits.)
21945
21946 This is made a little more complicated by automatic handling of 'Q'
21947 registers elsewhere, which means the register number is already 2*N where
21948 N is the number the user wrote after the register letter.
21949 */
21950 return mult;
21951 }
21952 #undef vcx_vec_pos
21953 #undef vcx_size_pos
21954
21955 static void
21956 vcx_ensure_register_in_range (unsigned R, enum vcx_reg_type reg_type)
21957 {
21958 if (reg_type == q_reg)
21959 {
21960 gas_assert (R % 2 == 0);
21961 constraint (R >= 16, _("'q' register must be in range 0-7"));
21962 }
21963 else if (reg_type == d_reg)
21964 constraint (R >= 16, _("'d' register must be in range 0-15"));
21965 else
21966 constraint (R >= 32, _("'s' register must be in range 0-31"));
21967 }
21968
21969 static void (*vcx_assign_vec[3]) (unsigned) = {
21970 vcx_assign_vec_d,
21971 vcx_assign_vec_m,
21972 vcx_assign_vec_n
21973 };
21974
21975 static void
21976 vcx_handle_register_arguments (unsigned num_registers,
21977 enum vcx_reg_type reg_type)
21978 {
21979 unsigned R, i;
21980 unsigned reg_mult = vcx_handle_shape (reg_type);
21981 for (i = 0; i < num_registers; i++)
21982 {
21983 R = inst.operands[i+1].reg;
21984 vcx_ensure_register_in_range (R, reg_type);
21985 if (num_registers == 3 && i > 0)
21986 {
21987 if (i == 2)
21988 vcx_assign_vec[1] (R * reg_mult);
21989 else
21990 vcx_assign_vec[2] (R * reg_mult);
21991 continue;
21992 }
21993 vcx_assign_vec[i](R * reg_mult);
21994 }
21995 }
21996
21997 static void
21998 vcx_handle_insn_block (enum vcx_reg_type reg_type)
21999 {
22000 if (reg_type == q_reg)
22001 if (inst.cond > COND_ALWAYS)
22002 inst.pred_insn_type = INSIDE_VPT_INSN;
22003 else
22004 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
22005 else if (inst.cond == COND_ALWAYS)
22006 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22007 else
22008 inst.error = BAD_NOT_IT;
22009 }
22010
22011 static void
22012 vcx_handle_common_checks (unsigned num_args, enum neon_shape rs)
22013 {
22014 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
22015 cde_handle_coproc ();
22016 enum vcx_reg_type reg_type = vcx_get_reg_type (rs);
22017 vcx_handle_register_arguments (num_args, reg_type);
22018 vcx_handle_insn_block (reg_type);
22019 if (reg_type == q_reg)
22020 constraint (!mark_feature_used (&mve_ext),
22021 _("vcx instructions with Q registers require MVE"));
22022 else
22023 constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp, cpu_variant)
22024 && mark_feature_used (&armv8m_fp))
22025 && !mark_feature_used (&mve_ext),
22026 _("vcx instructions with S or D registers require either MVE"
22027 " or Armv8-M floating point etension."));
22028 }
22029
22030 static void
22031 do_vcx1 (void)
22032 {
22033 enum neon_shape rs = neon_select_shape (NS_PQI, NS_PDI, NS_PFI, NS_NULL);
22034 vcx_handle_common_checks (1, rs);
22035
22036 unsigned imm = inst.operands[2].imm;
22037 inst.instruction |= (imm & 0x03f);
22038 inst.instruction |= (imm & 0x040) << 1;
22039 inst.instruction |= (imm & 0x780) << 9;
22040 if (rs != NS_PQI)
22041 constraint (imm >= 2048,
22042 _("vcx1 with S or D registers takes immediate within 0-2047"));
22043 inst.instruction |= (imm & 0x800) << 13;
22044 }
22045
22046 static void
22047 do_vcx2 (void)
22048 {
22049 enum neon_shape rs = neon_select_shape (NS_PQQI, NS_PDDI, NS_PFFI, NS_NULL);
22050 vcx_handle_common_checks (2, rs);
22051
22052 unsigned imm = inst.operands[3].imm;
22053 inst.instruction |= (imm & 0x01) << 4;
22054 inst.instruction |= (imm & 0x02) << 6;
22055 inst.instruction |= (imm & 0x3c) << 14;
22056 if (rs != NS_PQQI)
22057 constraint (imm >= 64,
22058 _("vcx2 with S or D registers takes immediate within 0-63"));
22059 inst.instruction |= (imm & 0x40) << 18;
22060 }
22061
22062 static void
22063 do_vcx3 (void)
22064 {
22065 enum neon_shape rs = neon_select_shape (NS_PQQQI, NS_PDDDI, NS_PFFFI, NS_NULL);
22066 vcx_handle_common_checks (3, rs);
22067
22068 unsigned imm = inst.operands[4].imm;
22069 inst.instruction |= (imm & 0x1) << 4;
22070 inst.instruction |= (imm & 0x6) << 19;
22071 if (rs != NS_PQQQI)
22072 constraint (imm >= 8,
22073 _("vcx2 with S or D registers takes immediate within 0-7"));
22074 inst.instruction |= (imm & 0x8) << 21;
22075 }
22076
22077 /* Crypto v1 instructions. */
22078 static void
22079 do_crypto_2op_1 (unsigned elttype, int op)
22080 {
22081 set_pred_insn_type (OUTSIDE_PRED_INSN);
22082
22083 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
22084 == NT_invtype)
22085 return;
22086
22087 inst.error = NULL;
22088
22089 NEON_ENCODE (INTEGER, inst);
22090 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
22091 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
22092 inst.instruction |= LOW4 (inst.operands[1].reg);
22093 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
22094 if (op != -1)
22095 inst.instruction |= op << 6;
22096
22097 if (thumb_mode)
22098 inst.instruction |= 0xfc000000;
22099 else
22100 inst.instruction |= 0xf0000000;
22101 }
22102
22103 static void
22104 do_crypto_3op_1 (int u, int op)
22105 {
22106 set_pred_insn_type (OUTSIDE_PRED_INSN);
22107
22108 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
22109 N_32 | N_UNT | N_KEY).type == NT_invtype)
22110 return;
22111
22112 inst.error = NULL;
22113
22114 NEON_ENCODE (INTEGER, inst);
22115 neon_three_same (1, u, 8 << op);
22116 }
22117
22118 static void
22119 do_aese (void)
22120 {
22121 do_crypto_2op_1 (N_8, 0);
22122 }
22123
22124 static void
22125 do_aesd (void)
22126 {
22127 do_crypto_2op_1 (N_8, 1);
22128 }
22129
22130 static void
22131 do_aesmc (void)
22132 {
22133 do_crypto_2op_1 (N_8, 2);
22134 }
22135
22136 static void
22137 do_aesimc (void)
22138 {
22139 do_crypto_2op_1 (N_8, 3);
22140 }
22141
22142 static void
22143 do_sha1c (void)
22144 {
22145 do_crypto_3op_1 (0, 0);
22146 }
22147
22148 static void
22149 do_sha1p (void)
22150 {
22151 do_crypto_3op_1 (0, 1);
22152 }
22153
22154 static void
22155 do_sha1m (void)
22156 {
22157 do_crypto_3op_1 (0, 2);
22158 }
22159
22160 static void
22161 do_sha1su0 (void)
22162 {
22163 do_crypto_3op_1 (0, 3);
22164 }
22165
22166 static void
22167 do_sha256h (void)
22168 {
22169 do_crypto_3op_1 (1, 0);
22170 }
22171
22172 static void
22173 do_sha256h2 (void)
22174 {
22175 do_crypto_3op_1 (1, 1);
22176 }
22177
22178 static void
22179 do_sha256su1 (void)
22180 {
22181 do_crypto_3op_1 (1, 2);
22182 }
22183
22184 static void
22185 do_sha1h (void)
22186 {
22187 do_crypto_2op_1 (N_32, -1);
22188 }
22189
22190 static void
22191 do_sha1su1 (void)
22192 {
22193 do_crypto_2op_1 (N_32, 0);
22194 }
22195
22196 static void
22197 do_sha256su0 (void)
22198 {
22199 do_crypto_2op_1 (N_32, 1);
22200 }
22201
22202 static void
22203 do_crc32_1 (unsigned int poly, unsigned int sz)
22204 {
22205 unsigned int Rd = inst.operands[0].reg;
22206 unsigned int Rn = inst.operands[1].reg;
22207 unsigned int Rm = inst.operands[2].reg;
22208
22209 set_pred_insn_type (OUTSIDE_PRED_INSN);
22210 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
22211 inst.instruction |= LOW4 (Rn) << 16;
22212 inst.instruction |= LOW4 (Rm);
22213 inst.instruction |= sz << (thumb_mode ? 4 : 21);
22214 inst.instruction |= poly << (thumb_mode ? 20 : 9);
22215
22216 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
22217 as_warn (UNPRED_REG ("r15"));
22218 }
22219
22220 static void
22221 do_crc32b (void)
22222 {
22223 do_crc32_1 (0, 0);
22224 }
22225
22226 static void
22227 do_crc32h (void)
22228 {
22229 do_crc32_1 (0, 1);
22230 }
22231
22232 static void
22233 do_crc32w (void)
22234 {
22235 do_crc32_1 (0, 2);
22236 }
22237
22238 static void
22239 do_crc32cb (void)
22240 {
22241 do_crc32_1 (1, 0);
22242 }
22243
22244 static void
22245 do_crc32ch (void)
22246 {
22247 do_crc32_1 (1, 1);
22248 }
22249
22250 static void
22251 do_crc32cw (void)
22252 {
22253 do_crc32_1 (1, 2);
22254 }
22255
22256 static void
22257 do_vjcvt (void)
22258 {
22259 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
22260 _(BAD_FPU));
22261 neon_check_type (2, NS_FD, N_S32, N_F64);
22262 do_vfp_sp_dp_cvt ();
22263 do_vfp_cond_or_thumb ();
22264 }
22265
22266 static void
22267 do_vdot (void)
22268 {
22269 enum neon_shape rs;
22270 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22271 set_pred_insn_type (OUTSIDE_PRED_INSN);
22272 if (inst.operands[2].isscalar)
22273 {
22274 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
22275 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22276
22277 inst.instruction |= (1 << 25);
22278 int index = inst.operands[2].reg & 0xf;
22279 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
22280 inst.operands[2].reg >>= 4;
22281 constraint (!(inst.operands[2].reg < 16),
22282 _("indexed register must be less than 16"));
22283 neon_three_args (rs == NS_QQS);
22284 inst.instruction |= (index << 5);
22285 }
22286 else
22287 {
22288 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
22289 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22290 neon_three_args (rs == NS_QQQ);
22291 }
22292 }
22293
22294 static void
22295 do_vmmla (void)
22296 {
22297 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
22298 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22299
22300 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22301 set_pred_insn_type (OUTSIDE_PRED_INSN);
22302
22303 neon_three_args (1);
22304 }
22305
22306 \f
22307 /* Overall per-instruction processing. */
22308
22309 /* We need to be able to fix up arbitrary expressions in some statements.
22310 This is so that we can handle symbols that are an arbitrary distance from
22311 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
22312 which returns part of an address in a form which will be valid for
22313 a data instruction. We do this by pushing the expression into a symbol
22314 in the expr_section, and creating a fix for that. */
22315
22316 static void
22317 fix_new_arm (fragS * frag,
22318 int where,
22319 short int size,
22320 expressionS * exp,
22321 int pc_rel,
22322 int reloc)
22323 {
22324 fixS * new_fix;
22325
22326 switch (exp->X_op)
22327 {
22328 case O_constant:
22329 if (pc_rel)
22330 {
22331 /* Create an absolute valued symbol, so we have something to
22332 refer to in the object file. Unfortunately for us, gas's
22333 generic expression parsing will already have folded out
22334 any use of .set foo/.type foo %function that may have
22335 been used to set type information of the target location,
22336 that's being specified symbolically. We have to presume
22337 the user knows what they are doing. */
22338 char name[16 + 8];
22339 symbolS *symbol;
22340
22341 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
22342
22343 symbol = symbol_find_or_make (name);
22344 S_SET_SEGMENT (symbol, absolute_section);
22345 symbol_set_frag (symbol, &zero_address_frag);
22346 S_SET_VALUE (symbol, exp->X_add_number);
22347 exp->X_op = O_symbol;
22348 exp->X_add_symbol = symbol;
22349 exp->X_add_number = 0;
22350 }
22351 /* FALLTHROUGH */
22352 case O_symbol:
22353 case O_add:
22354 case O_subtract:
22355 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
22356 (enum bfd_reloc_code_real) reloc);
22357 break;
22358
22359 default:
22360 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
22361 pc_rel, (enum bfd_reloc_code_real) reloc);
22362 break;
22363 }
22364
22365 /* Mark whether the fix is to a THUMB instruction, or an ARM
22366 instruction. */
22367 new_fix->tc_fix_data = thumb_mode;
22368 }
22369
22370 /* Create a frg for an instruction requiring relaxation. */
22371 static void
22372 output_relax_insn (void)
22373 {
22374 char * to;
22375 symbolS *sym;
22376 int offset;
22377
22378 /* The size of the instruction is unknown, so tie the debug info to the
22379 start of the instruction. */
22380 dwarf2_emit_insn (0);
22381
22382 switch (inst.relocs[0].exp.X_op)
22383 {
22384 case O_symbol:
22385 sym = inst.relocs[0].exp.X_add_symbol;
22386 offset = inst.relocs[0].exp.X_add_number;
22387 break;
22388 case O_constant:
22389 sym = NULL;
22390 offset = inst.relocs[0].exp.X_add_number;
22391 break;
22392 default:
22393 sym = make_expr_symbol (&inst.relocs[0].exp);
22394 offset = 0;
22395 break;
22396 }
22397 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
22398 inst.relax, sym, offset, NULL/*offset, opcode*/);
22399 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
22400 }
22401
22402 /* Write a 32-bit thumb instruction to buf. */
22403 static void
22404 put_thumb32_insn (char * buf, unsigned long insn)
22405 {
22406 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
22407 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
22408 }
22409
22410 static void
22411 output_inst (const char * str)
22412 {
22413 char * to = NULL;
22414
22415 if (inst.error)
22416 {
22417 as_bad ("%s -- `%s'", inst.error, str);
22418 return;
22419 }
22420 if (inst.relax)
22421 {
22422 output_relax_insn ();
22423 return;
22424 }
22425 if (inst.size == 0)
22426 return;
22427
22428 to = frag_more (inst.size);
22429 /* PR 9814: Record the thumb mode into the current frag so that we know
22430 what type of NOP padding to use, if necessary. We override any previous
22431 setting so that if the mode has changed then the NOPS that we use will
22432 match the encoding of the last instruction in the frag. */
22433 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22434
22435 if (thumb_mode && (inst.size > THUMB_SIZE))
22436 {
22437 gas_assert (inst.size == (2 * THUMB_SIZE));
22438 put_thumb32_insn (to, inst.instruction);
22439 }
22440 else if (inst.size > INSN_SIZE)
22441 {
22442 gas_assert (inst.size == (2 * INSN_SIZE));
22443 md_number_to_chars (to, inst.instruction, INSN_SIZE);
22444 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
22445 }
22446 else
22447 md_number_to_chars (to, inst.instruction, inst.size);
22448
22449 int r;
22450 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22451 {
22452 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
22453 fix_new_arm (frag_now, to - frag_now->fr_literal,
22454 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
22455 inst.relocs[r].type);
22456 }
22457
22458 dwarf2_emit_insn (inst.size);
22459 }
22460
22461 static char *
22462 output_it_inst (int cond, int mask, char * to)
22463 {
22464 unsigned long instruction = 0xbf00;
22465
22466 mask &= 0xf;
22467 instruction |= mask;
22468 instruction |= cond << 4;
22469
22470 if (to == NULL)
22471 {
22472 to = frag_more (2);
22473 #ifdef OBJ_ELF
22474 dwarf2_emit_insn (2);
22475 #endif
22476 }
22477
22478 md_number_to_chars (to, instruction, 2);
22479
22480 return to;
22481 }
22482
22483 /* Tag values used in struct asm_opcode's tag field. */
22484 enum opcode_tag
22485 {
22486 OT_unconditional, /* Instruction cannot be conditionalized.
22487 The ARM condition field is still 0xE. */
22488 OT_unconditionalF, /* Instruction cannot be conditionalized
22489 and carries 0xF in its ARM condition field. */
22490 OT_csuffix, /* Instruction takes a conditional suffix. */
22491 OT_csuffixF, /* Some forms of the instruction take a scalar
22492 conditional suffix, others place 0xF where the
22493 condition field would be, others take a vector
22494 conditional suffix. */
22495 OT_cinfix3, /* Instruction takes a conditional infix,
22496 beginning at character index 3. (In
22497 unified mode, it becomes a suffix.) */
22498 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
22499 tsts, cmps, cmns, and teqs. */
22500 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
22501 character index 3, even in unified mode. Used for
22502 legacy instructions where suffix and infix forms
22503 may be ambiguous. */
22504 OT_csuf_or_in3, /* Instruction takes either a conditional
22505 suffix or an infix at character index 3. */
22506 OT_odd_infix_unc, /* This is the unconditional variant of an
22507 instruction that takes a conditional infix
22508 at an unusual position. In unified mode,
22509 this variant will accept a suffix. */
22510 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
22511 are the conditional variants of instructions that
22512 take conditional infixes in unusual positions.
22513 The infix appears at character index
22514 (tag - OT_odd_infix_0). These are not accepted
22515 in unified mode. */
22516 };
22517
22518 /* Subroutine of md_assemble, responsible for looking up the primary
22519 opcode from the mnemonic the user wrote. STR points to the
22520 beginning of the mnemonic.
22521
22522 This is not simply a hash table lookup, because of conditional
22523 variants. Most instructions have conditional variants, which are
22524 expressed with a _conditional affix_ to the mnemonic. If we were
22525 to encode each conditional variant as a literal string in the opcode
22526 table, it would have approximately 20,000 entries.
22527
22528 Most mnemonics take this affix as a suffix, and in unified syntax,
22529 'most' is upgraded to 'all'. However, in the divided syntax, some
22530 instructions take the affix as an infix, notably the s-variants of
22531 the arithmetic instructions. Of those instructions, all but six
22532 have the infix appear after the third character of the mnemonic.
22533
22534 Accordingly, the algorithm for looking up primary opcodes given
22535 an identifier is:
22536
22537 1. Look up the identifier in the opcode table.
22538 If we find a match, go to step U.
22539
22540 2. Look up the last two characters of the identifier in the
22541 conditions table. If we find a match, look up the first N-2
22542 characters of the identifier in the opcode table. If we
22543 find a match, go to step CE.
22544
22545 3. Look up the fourth and fifth characters of the identifier in
22546 the conditions table. If we find a match, extract those
22547 characters from the identifier, and look up the remaining
22548 characters in the opcode table. If we find a match, go
22549 to step CM.
22550
22551 4. Fail.
22552
22553 U. Examine the tag field of the opcode structure, in case this is
22554 one of the six instructions with its conditional infix in an
22555 unusual place. If it is, the tag tells us where to find the
22556 infix; look it up in the conditions table and set inst.cond
22557 accordingly. Otherwise, this is an unconditional instruction.
22558 Again set inst.cond accordingly. Return the opcode structure.
22559
22560 CE. Examine the tag field to make sure this is an instruction that
22561 should receive a conditional suffix. If it is not, fail.
22562 Otherwise, set inst.cond from the suffix we already looked up,
22563 and return the opcode structure.
22564
22565 CM. Examine the tag field to make sure this is an instruction that
22566 should receive a conditional infix after the third character.
22567 If it is not, fail. Otherwise, undo the edits to the current
22568 line of input and proceed as for case CE. */
22569
22570 static const struct asm_opcode *
22571 opcode_lookup (char **str)
22572 {
22573 char *end, *base;
22574 char *affix;
22575 const struct asm_opcode *opcode;
22576 const struct asm_cond *cond;
22577 char save[2];
22578
22579 /* Scan up to the end of the mnemonic, which must end in white space,
22580 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
22581 for (base = end = *str; *end != '\0'; end++)
22582 if (*end == ' ' || *end == '.')
22583 break;
22584
22585 if (end == base)
22586 return NULL;
22587
22588 /* Handle a possible width suffix and/or Neon type suffix. */
22589 if (end[0] == '.')
22590 {
22591 int offset = 2;
22592
22593 /* The .w and .n suffixes are only valid if the unified syntax is in
22594 use. */
22595 if (unified_syntax && end[1] == 'w')
22596 inst.size_req = 4;
22597 else if (unified_syntax && end[1] == 'n')
22598 inst.size_req = 2;
22599 else
22600 offset = 0;
22601
22602 inst.vectype.elems = 0;
22603
22604 *str = end + offset;
22605
22606 if (end[offset] == '.')
22607 {
22608 /* See if we have a Neon type suffix (possible in either unified or
22609 non-unified ARM syntax mode). */
22610 if (parse_neon_type (&inst.vectype, str) == FAIL)
22611 return NULL;
22612 }
22613 else if (end[offset] != '\0' && end[offset] != ' ')
22614 return NULL;
22615 }
22616 else
22617 *str = end;
22618
22619 /* Look for unaffixed or special-case affixed mnemonic. */
22620 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22621 end - base);
22622 if (opcode)
22623 {
22624 /* step U */
22625 if (opcode->tag < OT_odd_infix_0)
22626 {
22627 inst.cond = COND_ALWAYS;
22628 return opcode;
22629 }
22630
22631 if (warn_on_deprecated && unified_syntax)
22632 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22633 affix = base + (opcode->tag - OT_odd_infix_0);
22634 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22635 gas_assert (cond);
22636
22637 inst.cond = cond->value;
22638 return opcode;
22639 }
22640 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22641 {
22642 /* Cannot have a conditional suffix on a mnemonic of less than a character.
22643 */
22644 if (end - base < 2)
22645 return NULL;
22646 affix = end - 1;
22647 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
22648 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22649 affix - base);
22650 /* If this opcode can not be vector predicated then don't accept it with a
22651 vector predication code. */
22652 if (opcode && !opcode->mayBeVecPred)
22653 opcode = NULL;
22654 }
22655 if (!opcode || !cond)
22656 {
22657 /* Cannot have a conditional suffix on a mnemonic of less than two
22658 characters. */
22659 if (end - base < 3)
22660 return NULL;
22661
22662 /* Look for suffixed mnemonic. */
22663 affix = end - 2;
22664 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22665 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22666 affix - base);
22667 }
22668
22669 if (opcode && cond)
22670 {
22671 /* step CE */
22672 switch (opcode->tag)
22673 {
22674 case OT_cinfix3_legacy:
22675 /* Ignore conditional suffixes matched on infix only mnemonics. */
22676 break;
22677
22678 case OT_cinfix3:
22679 case OT_cinfix3_deprecated:
22680 case OT_odd_infix_unc:
22681 if (!unified_syntax)
22682 return NULL;
22683 /* Fall through. */
22684
22685 case OT_csuffix:
22686 case OT_csuffixF:
22687 case OT_csuf_or_in3:
22688 inst.cond = cond->value;
22689 return opcode;
22690
22691 case OT_unconditional:
22692 case OT_unconditionalF:
22693 if (thumb_mode)
22694 inst.cond = cond->value;
22695 else
22696 {
22697 /* Delayed diagnostic. */
22698 inst.error = BAD_COND;
22699 inst.cond = COND_ALWAYS;
22700 }
22701 return opcode;
22702
22703 default:
22704 return NULL;
22705 }
22706 }
22707
22708 /* Cannot have a usual-position infix on a mnemonic of less than
22709 six characters (five would be a suffix). */
22710 if (end - base < 6)
22711 return NULL;
22712
22713 /* Look for infixed mnemonic in the usual position. */
22714 affix = base + 3;
22715 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22716 if (!cond)
22717 return NULL;
22718
22719 memcpy (save, affix, 2);
22720 memmove (affix, affix + 2, (end - affix) - 2);
22721 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22722 (end - base) - 2);
22723 memmove (affix + 2, affix, (end - affix) - 2);
22724 memcpy (affix, save, 2);
22725
22726 if (opcode
22727 && (opcode->tag == OT_cinfix3
22728 || opcode->tag == OT_cinfix3_deprecated
22729 || opcode->tag == OT_csuf_or_in3
22730 || opcode->tag == OT_cinfix3_legacy))
22731 {
22732 /* Step CM. */
22733 if (warn_on_deprecated && unified_syntax
22734 && (opcode->tag == OT_cinfix3
22735 || opcode->tag == OT_cinfix3_deprecated))
22736 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22737
22738 inst.cond = cond->value;
22739 return opcode;
22740 }
22741
22742 return NULL;
22743 }
22744
22745 /* This function generates an initial IT instruction, leaving its block
22746 virtually open for the new instructions. Eventually,
22747 the mask will be updated by now_pred_add_mask () each time
22748 a new instruction needs to be included in the IT block.
22749 Finally, the block is closed with close_automatic_it_block ().
22750 The block closure can be requested either from md_assemble (),
22751 a tencode (), or due to a label hook. */
22752
22753 static void
22754 new_automatic_it_block (int cond)
22755 {
22756 now_pred.state = AUTOMATIC_PRED_BLOCK;
22757 now_pred.mask = 0x18;
22758 now_pred.cc = cond;
22759 now_pred.block_length = 1;
22760 mapping_state (MAP_THUMB);
22761 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22762 now_pred.warn_deprecated = FALSE;
22763 now_pred.insn_cond = TRUE;
22764 }
22765
22766 /* Close an automatic IT block.
22767 See comments in new_automatic_it_block (). */
22768
22769 static void
22770 close_automatic_it_block (void)
22771 {
22772 now_pred.mask = 0x10;
22773 now_pred.block_length = 0;
22774 }
22775
22776 /* Update the mask of the current automatically-generated IT
22777 instruction. See comments in new_automatic_it_block (). */
22778
22779 static void
22780 now_pred_add_mask (int cond)
22781 {
22782 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
22783 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
22784 | ((bitvalue) << (nbit)))
22785 const int resulting_bit = (cond & 1);
22786
22787 now_pred.mask &= 0xf;
22788 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22789 resulting_bit,
22790 (5 - now_pred.block_length));
22791 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22792 1,
22793 ((5 - now_pred.block_length) - 1));
22794 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22795
22796 #undef CLEAR_BIT
22797 #undef SET_BIT_VALUE
22798 }
22799
22800 /* The IT blocks handling machinery is accessed through the these functions:
22801 it_fsm_pre_encode () from md_assemble ()
22802 set_pred_insn_type () optional, from the tencode functions
22803 set_pred_insn_type_last () ditto
22804 in_pred_block () ditto
22805 it_fsm_post_encode () from md_assemble ()
22806 force_automatic_it_block_close () from label handling functions
22807
22808 Rationale:
22809 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22810 initializing the IT insn type with a generic initial value depending
22811 on the inst.condition.
22812 2) During the tencode function, two things may happen:
22813 a) The tencode function overrides the IT insn type by
22814 calling either set_pred_insn_type (type) or
22815 set_pred_insn_type_last ().
22816 b) The tencode function queries the IT block state by
22817 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22818
22819 Both set_pred_insn_type and in_pred_block run the internal FSM state
22820 handling function (handle_pred_state), because: a) setting the IT insn
22821 type may incur in an invalid state (exiting the function),
22822 and b) querying the state requires the FSM to be updated.
22823 Specifically we want to avoid creating an IT block for conditional
22824 branches, so it_fsm_pre_encode is actually a guess and we can't
22825 determine whether an IT block is required until the tencode () routine
22826 has decided what type of instruction this actually it.
22827 Because of this, if set_pred_insn_type and in_pred_block have to be
22828 used, set_pred_insn_type has to be called first.
22829
22830 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22831 that determines the insn IT type depending on the inst.cond code.
22832 When a tencode () routine encodes an instruction that can be
22833 either outside an IT block, or, in the case of being inside, has to be
22834 the last one, set_pred_insn_type_last () will determine the proper
22835 IT instruction type based on the inst.cond code. Otherwise,
22836 set_pred_insn_type can be called for overriding that logic or
22837 for covering other cases.
22838
22839 Calling handle_pred_state () may not transition the IT block state to
22840 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22841 still queried. Instead, if the FSM determines that the state should
22842 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22843 after the tencode () function: that's what it_fsm_post_encode () does.
22844
22845 Since in_pred_block () calls the state handling function to get an
22846 updated state, an error may occur (due to invalid insns combination).
22847 In that case, inst.error is set.
22848 Therefore, inst.error has to be checked after the execution of
22849 the tencode () routine.
22850
22851 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22852 any pending state change (if any) that didn't take place in
22853 handle_pred_state () as explained above. */
22854
22855 static void
22856 it_fsm_pre_encode (void)
22857 {
22858 if (inst.cond != COND_ALWAYS)
22859 inst.pred_insn_type = INSIDE_IT_INSN;
22860 else
22861 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22862
22863 now_pred.state_handled = 0;
22864 }
22865
22866 /* IT state FSM handling function. */
22867 /* MVE instructions and non-MVE instructions are handled differently because of
22868 the introduction of VPT blocks.
22869 Specifications say that any non-MVE instruction inside a VPT block is
22870 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
22871 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
22872 few exceptions we have MVE_UNPREDICABLE_INSN.
22873 The error messages provided depending on the different combinations possible
22874 are described in the cases below:
22875 For 'most' MVE instructions:
22876 1) In an IT block, with an IT code: syntax error
22877 2) In an IT block, with a VPT code: error: must be in a VPT block
22878 3) In an IT block, with no code: warning: UNPREDICTABLE
22879 4) In a VPT block, with an IT code: syntax error
22880 5) In a VPT block, with a VPT code: OK!
22881 6) In a VPT block, with no code: error: missing code
22882 7) Outside a pred block, with an IT code: error: syntax error
22883 8) Outside a pred block, with a VPT code: error: should be in a VPT block
22884 9) Outside a pred block, with no code: OK!
22885 For non-MVE instructions:
22886 10) In an IT block, with an IT code: OK!
22887 11) In an IT block, with a VPT code: syntax error
22888 12) In an IT block, with no code: error: missing code
22889 13) In a VPT block, with an IT code: error: should be in an IT block
22890 14) In a VPT block, with a VPT code: syntax error
22891 15) In a VPT block, with no code: UNPREDICTABLE
22892 16) Outside a pred block, with an IT code: error: should be in an IT block
22893 17) Outside a pred block, with a VPT code: syntax error
22894 18) Outside a pred block, with no code: OK!
22895 */
22896
22897
22898 static int
22899 handle_pred_state (void)
22900 {
22901 now_pred.state_handled = 1;
22902 now_pred.insn_cond = FALSE;
22903
22904 switch (now_pred.state)
22905 {
22906 case OUTSIDE_PRED_BLOCK:
22907 switch (inst.pred_insn_type)
22908 {
22909 case MVE_UNPREDICABLE_INSN:
22910 case MVE_OUTSIDE_PRED_INSN:
22911 if (inst.cond < COND_ALWAYS)
22912 {
22913 /* Case 7: Outside a pred block, with an IT code: error: syntax
22914 error. */
22915 inst.error = BAD_SYNTAX;
22916 return FAIL;
22917 }
22918 /* Case 9: Outside a pred block, with no code: OK! */
22919 break;
22920 case OUTSIDE_PRED_INSN:
22921 if (inst.cond > COND_ALWAYS)
22922 {
22923 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22924 */
22925 inst.error = BAD_SYNTAX;
22926 return FAIL;
22927 }
22928 /* Case 18: Outside a pred block, with no code: OK! */
22929 break;
22930
22931 case INSIDE_VPT_INSN:
22932 /* Case 8: Outside a pred block, with a VPT code: error: should be in
22933 a VPT block. */
22934 inst.error = BAD_OUT_VPT;
22935 return FAIL;
22936
22937 case INSIDE_IT_INSN:
22938 case INSIDE_IT_LAST_INSN:
22939 if (inst.cond < COND_ALWAYS)
22940 {
22941 /* Case 16: Outside a pred block, with an IT code: error: should
22942 be in an IT block. */
22943 if (thumb_mode == 0)
22944 {
22945 if (unified_syntax
22946 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22947 as_tsktsk (_("Warning: conditional outside an IT block"\
22948 " for Thumb."));
22949 }
22950 else
22951 {
22952 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22953 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22954 {
22955 /* Automatically generate the IT instruction. */
22956 new_automatic_it_block (inst.cond);
22957 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22958 close_automatic_it_block ();
22959 }
22960 else
22961 {
22962 inst.error = BAD_OUT_IT;
22963 return FAIL;
22964 }
22965 }
22966 break;
22967 }
22968 else if (inst.cond > COND_ALWAYS)
22969 {
22970 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22971 */
22972 inst.error = BAD_SYNTAX;
22973 return FAIL;
22974 }
22975 else
22976 gas_assert (0);
22977 case IF_INSIDE_IT_LAST_INSN:
22978 case NEUTRAL_IT_INSN:
22979 break;
22980
22981 case VPT_INSN:
22982 if (inst.cond != COND_ALWAYS)
22983 first_error (BAD_SYNTAX);
22984 now_pred.state = MANUAL_PRED_BLOCK;
22985 now_pred.block_length = 0;
22986 now_pred.type = VECTOR_PRED;
22987 now_pred.cc = 0;
22988 break;
22989 case IT_INSN:
22990 now_pred.state = MANUAL_PRED_BLOCK;
22991 now_pred.block_length = 0;
22992 now_pred.type = SCALAR_PRED;
22993 break;
22994 }
22995 break;
22996
22997 case AUTOMATIC_PRED_BLOCK:
22998 /* Three things may happen now:
22999 a) We should increment current it block size;
23000 b) We should close current it block (closing insn or 4 insns);
23001 c) We should close current it block and start a new one (due
23002 to incompatible conditions or
23003 4 insns-length block reached). */
23004
23005 switch (inst.pred_insn_type)
23006 {
23007 case INSIDE_VPT_INSN:
23008 case VPT_INSN:
23009 case MVE_UNPREDICABLE_INSN:
23010 case MVE_OUTSIDE_PRED_INSN:
23011 gas_assert (0);
23012 case OUTSIDE_PRED_INSN:
23013 /* The closure of the block shall happen immediately,
23014 so any in_pred_block () call reports the block as closed. */
23015 force_automatic_it_block_close ();
23016 break;
23017
23018 case INSIDE_IT_INSN:
23019 case INSIDE_IT_LAST_INSN:
23020 case IF_INSIDE_IT_LAST_INSN:
23021 now_pred.block_length++;
23022
23023 if (now_pred.block_length > 4
23024 || !now_pred_compatible (inst.cond))
23025 {
23026 force_automatic_it_block_close ();
23027 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
23028 new_automatic_it_block (inst.cond);
23029 }
23030 else
23031 {
23032 now_pred.insn_cond = TRUE;
23033 now_pred_add_mask (inst.cond);
23034 }
23035
23036 if (now_pred.state == AUTOMATIC_PRED_BLOCK
23037 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
23038 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
23039 close_automatic_it_block ();
23040 break;
23041
23042 /* Fallthrough. */
23043 case NEUTRAL_IT_INSN:
23044 now_pred.block_length++;
23045 now_pred.insn_cond = TRUE;
23046
23047 if (now_pred.block_length > 4)
23048 force_automatic_it_block_close ();
23049 else
23050 now_pred_add_mask (now_pred.cc & 1);
23051 break;
23052
23053 case IT_INSN:
23054 close_automatic_it_block ();
23055 now_pred.state = MANUAL_PRED_BLOCK;
23056 break;
23057 }
23058 break;
23059
23060 case MANUAL_PRED_BLOCK:
23061 {
23062 int cond, is_last;
23063 if (now_pred.type == SCALAR_PRED)
23064 {
23065 /* Check conditional suffixes. */
23066 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
23067 now_pred.mask <<= 1;
23068 now_pred.mask &= 0x1f;
23069 is_last = (now_pred.mask == 0x10);
23070 }
23071 else
23072 {
23073 now_pred.cc ^= (now_pred.mask >> 4);
23074 cond = now_pred.cc + 0xf;
23075 now_pred.mask <<= 1;
23076 now_pred.mask &= 0x1f;
23077 is_last = now_pred.mask == 0x10;
23078 }
23079 now_pred.insn_cond = TRUE;
23080
23081 switch (inst.pred_insn_type)
23082 {
23083 case OUTSIDE_PRED_INSN:
23084 if (now_pred.type == SCALAR_PRED)
23085 {
23086 if (inst.cond == COND_ALWAYS)
23087 {
23088 /* Case 12: In an IT block, with no code: error: missing
23089 code. */
23090 inst.error = BAD_NOT_IT;
23091 return FAIL;
23092 }
23093 else if (inst.cond > COND_ALWAYS)
23094 {
23095 /* Case 11: In an IT block, with a VPT code: syntax error.
23096 */
23097 inst.error = BAD_SYNTAX;
23098 return FAIL;
23099 }
23100 else if (thumb_mode)
23101 {
23102 /* This is for some special cases where a non-MVE
23103 instruction is not allowed in an IT block, such as cbz,
23104 but are put into one with a condition code.
23105 You could argue this should be a syntax error, but we
23106 gave the 'not allowed in IT block' diagnostic in the
23107 past so we will keep doing so. */
23108 inst.error = BAD_NOT_IT;
23109 return FAIL;
23110 }
23111 break;
23112 }
23113 else
23114 {
23115 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
23116 as_tsktsk (MVE_NOT_VPT);
23117 return SUCCESS;
23118 }
23119 case MVE_OUTSIDE_PRED_INSN:
23120 if (now_pred.type == SCALAR_PRED)
23121 {
23122 if (inst.cond == COND_ALWAYS)
23123 {
23124 /* Case 3: In an IT block, with no code: warning:
23125 UNPREDICTABLE. */
23126 as_tsktsk (MVE_NOT_IT);
23127 return SUCCESS;
23128 }
23129 else if (inst.cond < COND_ALWAYS)
23130 {
23131 /* Case 1: In an IT block, with an IT code: syntax error.
23132 */
23133 inst.error = BAD_SYNTAX;
23134 return FAIL;
23135 }
23136 else
23137 gas_assert (0);
23138 }
23139 else
23140 {
23141 if (inst.cond < COND_ALWAYS)
23142 {
23143 /* Case 4: In a VPT block, with an IT code: syntax error.
23144 */
23145 inst.error = BAD_SYNTAX;
23146 return FAIL;
23147 }
23148 else if (inst.cond == COND_ALWAYS)
23149 {
23150 /* Case 6: In a VPT block, with no code: error: missing
23151 code. */
23152 inst.error = BAD_NOT_VPT;
23153 return FAIL;
23154 }
23155 else
23156 {
23157 gas_assert (0);
23158 }
23159 }
23160 case MVE_UNPREDICABLE_INSN:
23161 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
23162 return SUCCESS;
23163 case INSIDE_IT_INSN:
23164 if (inst.cond > COND_ALWAYS)
23165 {
23166 /* Case 11: In an IT block, with a VPT code: syntax error. */
23167 /* Case 14: In a VPT block, with a VPT code: syntax error. */
23168 inst.error = BAD_SYNTAX;
23169 return FAIL;
23170 }
23171 else if (now_pred.type == SCALAR_PRED)
23172 {
23173 /* Case 10: In an IT block, with an IT code: OK! */
23174 if (cond != inst.cond)
23175 {
23176 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
23177 BAD_VPT_COND;
23178 return FAIL;
23179 }
23180 }
23181 else
23182 {
23183 /* Case 13: In a VPT block, with an IT code: error: should be
23184 in an IT block. */
23185 inst.error = BAD_OUT_IT;
23186 return FAIL;
23187 }
23188 break;
23189
23190 case INSIDE_VPT_INSN:
23191 if (now_pred.type == SCALAR_PRED)
23192 {
23193 /* Case 2: In an IT block, with a VPT code: error: must be in a
23194 VPT block. */
23195 inst.error = BAD_OUT_VPT;
23196 return FAIL;
23197 }
23198 /* Case 5: In a VPT block, with a VPT code: OK! */
23199 else if (cond != inst.cond)
23200 {
23201 inst.error = BAD_VPT_COND;
23202 return FAIL;
23203 }
23204 break;
23205 case INSIDE_IT_LAST_INSN:
23206 case IF_INSIDE_IT_LAST_INSN:
23207 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
23208 {
23209 /* Case 4: In a VPT block, with an IT code: syntax error. */
23210 /* Case 11: In an IT block, with a VPT code: syntax error. */
23211 inst.error = BAD_SYNTAX;
23212 return FAIL;
23213 }
23214 else if (cond != inst.cond)
23215 {
23216 inst.error = BAD_IT_COND;
23217 return FAIL;
23218 }
23219 if (!is_last)
23220 {
23221 inst.error = BAD_BRANCH;
23222 return FAIL;
23223 }
23224 break;
23225
23226 case NEUTRAL_IT_INSN:
23227 /* The BKPT instruction is unconditional even in a IT or VPT
23228 block. */
23229 break;
23230
23231 case IT_INSN:
23232 if (now_pred.type == SCALAR_PRED)
23233 {
23234 inst.error = BAD_IT_IT;
23235 return FAIL;
23236 }
23237 /* fall through. */
23238 case VPT_INSN:
23239 if (inst.cond == COND_ALWAYS)
23240 {
23241 /* Executing a VPT/VPST instruction inside an IT block or a
23242 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
23243 */
23244 if (now_pred.type == SCALAR_PRED)
23245 as_tsktsk (MVE_NOT_IT);
23246 else
23247 as_tsktsk (MVE_NOT_VPT);
23248 return SUCCESS;
23249 }
23250 else
23251 {
23252 /* VPT/VPST do not accept condition codes. */
23253 inst.error = BAD_SYNTAX;
23254 return FAIL;
23255 }
23256 }
23257 }
23258 break;
23259 }
23260
23261 return SUCCESS;
23262 }
23263
23264 struct depr_insn_mask
23265 {
23266 unsigned long pattern;
23267 unsigned long mask;
23268 const char* description;
23269 };
23270
23271 /* List of 16-bit instruction patterns deprecated in an IT block in
23272 ARMv8. */
23273 static const struct depr_insn_mask depr_it_insns[] = {
23274 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
23275 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
23276 { 0xa000, 0xb800, N_("ADR") },
23277 { 0x4800, 0xf800, N_("Literal loads") },
23278 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
23279 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
23280 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
23281 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
23282 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
23283 { 0, 0, NULL }
23284 };
23285
23286 static void
23287 it_fsm_post_encode (void)
23288 {
23289 int is_last;
23290
23291 if (!now_pred.state_handled)
23292 handle_pred_state ();
23293
23294 if (now_pred.insn_cond
23295 && warn_on_restrict_it
23296 && !now_pred.warn_deprecated
23297 && warn_on_deprecated
23298 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
23299 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
23300 {
23301 if (inst.instruction >= 0x10000)
23302 {
23303 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
23304 "performance deprecated in ARMv8-A and ARMv8-R"));
23305 now_pred.warn_deprecated = TRUE;
23306 }
23307 else
23308 {
23309 const struct depr_insn_mask *p = depr_it_insns;
23310
23311 while (p->mask != 0)
23312 {
23313 if ((inst.instruction & p->mask) == p->pattern)
23314 {
23315 as_tsktsk (_("IT blocks containing 16-bit Thumb "
23316 "instructions of the following class are "
23317 "performance deprecated in ARMv8-A and "
23318 "ARMv8-R: %s"), p->description);
23319 now_pred.warn_deprecated = TRUE;
23320 break;
23321 }
23322
23323 ++p;
23324 }
23325 }
23326
23327 if (now_pred.block_length > 1)
23328 {
23329 as_tsktsk (_("IT blocks containing more than one conditional "
23330 "instruction are performance deprecated in ARMv8-A and "
23331 "ARMv8-R"));
23332 now_pred.warn_deprecated = TRUE;
23333 }
23334 }
23335
23336 is_last = (now_pred.mask == 0x10);
23337 if (is_last)
23338 {
23339 now_pred.state = OUTSIDE_PRED_BLOCK;
23340 now_pred.mask = 0;
23341 }
23342 }
23343
23344 static void
23345 force_automatic_it_block_close (void)
23346 {
23347 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
23348 {
23349 close_automatic_it_block ();
23350 now_pred.state = OUTSIDE_PRED_BLOCK;
23351 now_pred.mask = 0;
23352 }
23353 }
23354
23355 static int
23356 in_pred_block (void)
23357 {
23358 if (!now_pred.state_handled)
23359 handle_pred_state ();
23360
23361 return now_pred.state != OUTSIDE_PRED_BLOCK;
23362 }
23363
23364 /* Whether OPCODE only has T32 encoding. Since this function is only used by
23365 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
23366 here, hence the "known" in the function name. */
23367
23368 static bfd_boolean
23369 known_t32_only_insn (const struct asm_opcode *opcode)
23370 {
23371 /* Original Thumb-1 wide instruction. */
23372 if (opcode->tencode == do_t_blx
23373 || opcode->tencode == do_t_branch23
23374 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
23375 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
23376 return TRUE;
23377
23378 /* Wide-only instruction added to ARMv8-M Baseline. */
23379 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
23380 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
23381 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
23382 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
23383 return TRUE;
23384
23385 return FALSE;
23386 }
23387
23388 /* Whether wide instruction variant can be used if available for a valid OPCODE
23389 in ARCH. */
23390
23391 static bfd_boolean
23392 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
23393 {
23394 if (known_t32_only_insn (opcode))
23395 return TRUE;
23396
23397 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
23398 of variant T3 of B.W is checked in do_t_branch. */
23399 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23400 && opcode->tencode == do_t_branch)
23401 return TRUE;
23402
23403 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
23404 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23405 && opcode->tencode == do_t_mov_cmp
23406 /* Make sure CMP instruction is not affected. */
23407 && opcode->aencode == do_mov)
23408 return TRUE;
23409
23410 /* Wide instruction variants of all instructions with narrow *and* wide
23411 variants become available with ARMv6t2. Other opcodes are either
23412 narrow-only or wide-only and are thus available if OPCODE is valid. */
23413 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
23414 return TRUE;
23415
23416 /* OPCODE with narrow only instruction variant or wide variant not
23417 available. */
23418 return FALSE;
23419 }
23420
23421 void
23422 md_assemble (char *str)
23423 {
23424 char *p = str;
23425 const struct asm_opcode * opcode;
23426
23427 /* Align the previous label if needed. */
23428 if (last_label_seen != NULL)
23429 {
23430 symbol_set_frag (last_label_seen, frag_now);
23431 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
23432 S_SET_SEGMENT (last_label_seen, now_seg);
23433 }
23434
23435 memset (&inst, '\0', sizeof (inst));
23436 int r;
23437 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
23438 inst.relocs[r].type = BFD_RELOC_UNUSED;
23439
23440 opcode = opcode_lookup (&p);
23441 if (!opcode)
23442 {
23443 /* It wasn't an instruction, but it might be a register alias of
23444 the form alias .req reg, or a Neon .dn/.qn directive. */
23445 if (! create_register_alias (str, p)
23446 && ! create_neon_reg_alias (str, p))
23447 as_bad (_("bad instruction `%s'"), str);
23448
23449 return;
23450 }
23451
23452 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
23453 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
23454
23455 /* The value which unconditional instructions should have in place of the
23456 condition field. */
23457 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
23458
23459 if (thumb_mode)
23460 {
23461 arm_feature_set variant;
23462
23463 variant = cpu_variant;
23464 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
23465 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
23466 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
23467 /* Check that this instruction is supported for this CPU. */
23468 if (!opcode->tvariant
23469 || (thumb_mode == 1
23470 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
23471 {
23472 if (opcode->tencode == do_t_swi)
23473 as_bad (_("SVC is not permitted on this architecture"));
23474 else
23475 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
23476 return;
23477 }
23478 if (inst.cond != COND_ALWAYS && !unified_syntax
23479 && opcode->tencode != do_t_branch)
23480 {
23481 as_bad (_("Thumb does not support conditional execution"));
23482 return;
23483 }
23484
23485 /* Two things are addressed here:
23486 1) Implicit require narrow instructions on Thumb-1.
23487 This avoids relaxation accidentally introducing Thumb-2
23488 instructions.
23489 2) Reject wide instructions in non Thumb-2 cores.
23490
23491 Only instructions with narrow and wide variants need to be handled
23492 but selecting all non wide-only instructions is easier. */
23493 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
23494 && !t32_insn_ok (variant, opcode))
23495 {
23496 if (inst.size_req == 0)
23497 inst.size_req = 2;
23498 else if (inst.size_req == 4)
23499 {
23500 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
23501 as_bad (_("selected processor does not support 32bit wide "
23502 "variant of instruction `%s'"), str);
23503 else
23504 as_bad (_("selected processor does not support `%s' in "
23505 "Thumb-2 mode"), str);
23506 return;
23507 }
23508 }
23509
23510 inst.instruction = opcode->tvalue;
23511
23512 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
23513 {
23514 /* Prepare the pred_insn_type for those encodings that don't set
23515 it. */
23516 it_fsm_pre_encode ();
23517
23518 opcode->tencode ();
23519
23520 it_fsm_post_encode ();
23521 }
23522
23523 if (!(inst.error || inst.relax))
23524 {
23525 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23526 inst.size = (inst.instruction > 0xffff ? 4 : 2);
23527 if (inst.size_req && inst.size_req != inst.size)
23528 {
23529 as_bad (_("cannot honor width suffix -- `%s'"), str);
23530 return;
23531 }
23532 }
23533
23534 /* Something has gone badly wrong if we try to relax a fixed size
23535 instruction. */
23536 gas_assert (inst.size_req == 0 || !inst.relax);
23537
23538 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23539 *opcode->tvariant);
23540 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23541 set those bits when Thumb-2 32-bit instructions are seen. The impact
23542 of relaxable instructions will be considered later after we finish all
23543 relaxation. */
23544 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23545 variant = arm_arch_none;
23546 else
23547 variant = cpu_variant;
23548 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23549 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23550 arm_ext_v6t2);
23551
23552 check_neon_suffixes;
23553
23554 if (!inst.error)
23555 {
23556 mapping_state (MAP_THUMB);
23557 }
23558 }
23559 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23560 {
23561 bfd_boolean is_bx;
23562
23563 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
23564 is_bx = (opcode->aencode == do_bx);
23565
23566 /* Check that this instruction is supported for this CPU. */
23567 if (!(is_bx && fix_v4bx)
23568 && !(opcode->avariant &&
23569 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23570 {
23571 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23572 return;
23573 }
23574 if (inst.size_req)
23575 {
23576 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23577 return;
23578 }
23579
23580 inst.instruction = opcode->avalue;
23581 if (opcode->tag == OT_unconditionalF)
23582 inst.instruction |= 0xFU << 28;
23583 else
23584 inst.instruction |= inst.cond << 28;
23585 inst.size = INSN_SIZE;
23586 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
23587 {
23588 it_fsm_pre_encode ();
23589 opcode->aencode ();
23590 it_fsm_post_encode ();
23591 }
23592 /* Arm mode bx is marked as both v4T and v5 because it's still required
23593 on a hypothetical non-thumb v5 core. */
23594 if (is_bx)
23595 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23596 else
23597 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23598 *opcode->avariant);
23599
23600 check_neon_suffixes;
23601
23602 if (!inst.error)
23603 {
23604 mapping_state (MAP_ARM);
23605 }
23606 }
23607 else
23608 {
23609 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23610 "-- `%s'"), str);
23611 return;
23612 }
23613 output_inst (str);
23614 }
23615
23616 static void
23617 check_pred_blocks_finished (void)
23618 {
23619 #ifdef OBJ_ELF
23620 asection *sect;
23621
23622 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23623 if (seg_info (sect)->tc_segment_info_data.current_pred.state
23624 == MANUAL_PRED_BLOCK)
23625 {
23626 if (now_pred.type == SCALAR_PRED)
23627 as_warn (_("section '%s' finished with an open IT block."),
23628 sect->name);
23629 else
23630 as_warn (_("section '%s' finished with an open VPT/VPST block."),
23631 sect->name);
23632 }
23633 #else
23634 if (now_pred.state == MANUAL_PRED_BLOCK)
23635 {
23636 if (now_pred.type == SCALAR_PRED)
23637 as_warn (_("file finished with an open IT block."));
23638 else
23639 as_warn (_("file finished with an open VPT/VPST block."));
23640 }
23641 #endif
23642 }
23643
23644 /* Various frobbings of labels and their addresses. */
23645
23646 void
23647 arm_start_line_hook (void)
23648 {
23649 last_label_seen = NULL;
23650 }
23651
23652 void
23653 arm_frob_label (symbolS * sym)
23654 {
23655 last_label_seen = sym;
23656
23657 ARM_SET_THUMB (sym, thumb_mode);
23658
23659 #if defined OBJ_COFF || defined OBJ_ELF
23660 ARM_SET_INTERWORK (sym, support_interwork);
23661 #endif
23662
23663 force_automatic_it_block_close ();
23664
23665 /* Note - do not allow local symbols (.Lxxx) to be labelled
23666 as Thumb functions. This is because these labels, whilst
23667 they exist inside Thumb code, are not the entry points for
23668 possible ARM->Thumb calls. Also, these labels can be used
23669 as part of a computed goto or switch statement. eg gcc
23670 can generate code that looks like this:
23671
23672 ldr r2, [pc, .Laaa]
23673 lsl r3, r3, #2
23674 ldr r2, [r3, r2]
23675 mov pc, r2
23676
23677 .Lbbb: .word .Lxxx
23678 .Lccc: .word .Lyyy
23679 ..etc...
23680 .Laaa: .word Lbbb
23681
23682 The first instruction loads the address of the jump table.
23683 The second instruction converts a table index into a byte offset.
23684 The third instruction gets the jump address out of the table.
23685 The fourth instruction performs the jump.
23686
23687 If the address stored at .Laaa is that of a symbol which has the
23688 Thumb_Func bit set, then the linker will arrange for this address
23689 to have the bottom bit set, which in turn would mean that the
23690 address computation performed by the third instruction would end
23691 up with the bottom bit set. Since the ARM is capable of unaligned
23692 word loads, the instruction would then load the incorrect address
23693 out of the jump table, and chaos would ensue. */
23694 if (label_is_thumb_function_name
23695 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23696 && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23697 {
23698 /* When the address of a Thumb function is taken the bottom
23699 bit of that address should be set. This will allow
23700 interworking between Arm and Thumb functions to work
23701 correctly. */
23702
23703 THUMB_SET_FUNC (sym, 1);
23704
23705 label_is_thumb_function_name = FALSE;
23706 }
23707
23708 dwarf2_emit_label (sym);
23709 }
23710
23711 bfd_boolean
23712 arm_data_in_code (void)
23713 {
23714 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
23715 {
23716 *input_line_pointer = '/';
23717 input_line_pointer += 5;
23718 *input_line_pointer = 0;
23719 return TRUE;
23720 }
23721
23722 return FALSE;
23723 }
23724
23725 char *
23726 arm_canonicalize_symbol_name (char * name)
23727 {
23728 int len;
23729
23730 if (thumb_mode && (len = strlen (name)) > 5
23731 && streq (name + len - 5, "/data"))
23732 *(name + len - 5) = 0;
23733
23734 return name;
23735 }
23736 \f
23737 /* Table of all register names defined by default. The user can
23738 define additional names with .req. Note that all register names
23739 should appear in both upper and lowercase variants. Some registers
23740 also have mixed-case names. */
23741
23742 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
23743 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
23744 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23745 #define REGSET(p,t) \
23746 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23747 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23748 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23749 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23750 #define REGSETH(p,t) \
23751 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23752 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23753 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23754 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23755 #define REGSET2(p,t) \
23756 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23757 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23758 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23759 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23760 #define SPLRBANK(base,bank,t) \
23761 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23762 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23763 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23764 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23765 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23766 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23767
23768 static const struct reg_entry reg_names[] =
23769 {
23770 /* ARM integer registers. */
23771 REGSET(r, RN), REGSET(R, RN),
23772
23773 /* ATPCS synonyms. */
23774 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23775 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23776 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23777
23778 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23779 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23780 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23781
23782 /* Well-known aliases. */
23783 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23784 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23785
23786 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23787 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23788
23789 /* Defining the new Zero register from ARMv8.1-M. */
23790 REGDEF(zr,15,ZR),
23791 REGDEF(ZR,15,ZR),
23792
23793 /* Coprocessor numbers. */
23794 REGSET(p, CP), REGSET(P, CP),
23795
23796 /* Coprocessor register numbers. The "cr" variants are for backward
23797 compatibility. */
23798 REGSET(c, CN), REGSET(C, CN),
23799 REGSET(cr, CN), REGSET(CR, CN),
23800
23801 /* ARM banked registers. */
23802 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23803 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23804 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23805 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23806 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23807 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23808 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23809
23810 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23811 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23812 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23813 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23814 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23815 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23816 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23817 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23818
23819 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23820 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23821 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23822 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23823 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23824 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23825 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23826 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23827 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23828
23829 /* FPA registers. */
23830 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23831 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23832
23833 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23834 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23835
23836 /* VFP SP registers. */
23837 REGSET(s,VFS), REGSET(S,VFS),
23838 REGSETH(s,VFS), REGSETH(S,VFS),
23839
23840 /* VFP DP Registers. */
23841 REGSET(d,VFD), REGSET(D,VFD),
23842 /* Extra Neon DP registers. */
23843 REGSETH(d,VFD), REGSETH(D,VFD),
23844
23845 /* Neon QP registers. */
23846 REGSET2(q,NQ), REGSET2(Q,NQ),
23847
23848 /* VFP control registers. */
23849 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23850 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23851 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23852 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23853 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23854 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23855 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23856 REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23857 REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23858 REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23859 REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23860
23861 /* Maverick DSP coprocessor registers. */
23862 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
23863 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
23864
23865 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23866 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23867 REGDEF(dspsc,0,DSPSC),
23868
23869 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23870 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23871 REGDEF(DSPSC,0,DSPSC),
23872
23873 /* iWMMXt data registers - p0, c0-15. */
23874 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23875
23876 /* iWMMXt control registers - p1, c0-3. */
23877 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
23878 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
23879 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
23880 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
23881
23882 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
23883 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
23884 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
23885 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
23886 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
23887
23888 /* XScale accumulator registers. */
23889 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23890 };
23891 #undef REGDEF
23892 #undef REGNUM
23893 #undef REGSET
23894
23895 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
23896 within psr_required_here. */
23897 static const struct asm_psr psrs[] =
23898 {
23899 /* Backward compatibility notation. Note that "all" is no longer
23900 truly all possible PSR bits. */
23901 {"all", PSR_c | PSR_f},
23902 {"flg", PSR_f},
23903 {"ctl", PSR_c},
23904
23905 /* Individual flags. */
23906 {"f", PSR_f},
23907 {"c", PSR_c},
23908 {"x", PSR_x},
23909 {"s", PSR_s},
23910
23911 /* Combinations of flags. */
23912 {"fs", PSR_f | PSR_s},
23913 {"fx", PSR_f | PSR_x},
23914 {"fc", PSR_f | PSR_c},
23915 {"sf", PSR_s | PSR_f},
23916 {"sx", PSR_s | PSR_x},
23917 {"sc", PSR_s | PSR_c},
23918 {"xf", PSR_x | PSR_f},
23919 {"xs", PSR_x | PSR_s},
23920 {"xc", PSR_x | PSR_c},
23921 {"cf", PSR_c | PSR_f},
23922 {"cs", PSR_c | PSR_s},
23923 {"cx", PSR_c | PSR_x},
23924 {"fsx", PSR_f | PSR_s | PSR_x},
23925 {"fsc", PSR_f | PSR_s | PSR_c},
23926 {"fxs", PSR_f | PSR_x | PSR_s},
23927 {"fxc", PSR_f | PSR_x | PSR_c},
23928 {"fcs", PSR_f | PSR_c | PSR_s},
23929 {"fcx", PSR_f | PSR_c | PSR_x},
23930 {"sfx", PSR_s | PSR_f | PSR_x},
23931 {"sfc", PSR_s | PSR_f | PSR_c},
23932 {"sxf", PSR_s | PSR_x | PSR_f},
23933 {"sxc", PSR_s | PSR_x | PSR_c},
23934 {"scf", PSR_s | PSR_c | PSR_f},
23935 {"scx", PSR_s | PSR_c | PSR_x},
23936 {"xfs", PSR_x | PSR_f | PSR_s},
23937 {"xfc", PSR_x | PSR_f | PSR_c},
23938 {"xsf", PSR_x | PSR_s | PSR_f},
23939 {"xsc", PSR_x | PSR_s | PSR_c},
23940 {"xcf", PSR_x | PSR_c | PSR_f},
23941 {"xcs", PSR_x | PSR_c | PSR_s},
23942 {"cfs", PSR_c | PSR_f | PSR_s},
23943 {"cfx", PSR_c | PSR_f | PSR_x},
23944 {"csf", PSR_c | PSR_s | PSR_f},
23945 {"csx", PSR_c | PSR_s | PSR_x},
23946 {"cxf", PSR_c | PSR_x | PSR_f},
23947 {"cxs", PSR_c | PSR_x | PSR_s},
23948 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23949 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23950 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23951 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23952 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23953 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23954 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23955 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23956 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23957 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23958 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23959 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23960 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23961 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23962 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23963 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23964 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23965 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23966 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23967 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23968 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23969 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23970 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23971 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23972 };
23973
23974 /* Table of V7M psr names. */
23975 static const struct asm_psr v7m_psrs[] =
23976 {
23977 {"apsr", 0x0 }, {"APSR", 0x0 },
23978 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
23979 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
23980 {"psr", 0x3 }, {"PSR", 0x3 },
23981 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
23982 {"ipsr", 0x5 }, {"IPSR", 0x5 },
23983 {"epsr", 0x6 }, {"EPSR", 0x6 },
23984 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
23985 {"msp", 0x8 }, {"MSP", 0x8 },
23986 {"psp", 0x9 }, {"PSP", 0x9 },
23987 {"msplim", 0xa }, {"MSPLIM", 0xa },
23988 {"psplim", 0xb }, {"PSPLIM", 0xb },
23989 {"primask", 0x10}, {"PRIMASK", 0x10},
23990 {"basepri", 0x11}, {"BASEPRI", 0x11},
23991 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
23992 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
23993 {"control", 0x14}, {"CONTROL", 0x14},
23994 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
23995 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
23996 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
23997 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
23998 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
23999 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
24000 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
24001 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
24002 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
24003 };
24004
24005 /* Table of all shift-in-operand names. */
24006 static const struct asm_shift_name shift_names [] =
24007 {
24008 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
24009 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
24010 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
24011 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
24012 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
24013 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
24014 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
24015 };
24016
24017 /* Table of all explicit relocation names. */
24018 #ifdef OBJ_ELF
24019 static struct reloc_entry reloc_names[] =
24020 {
24021 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
24022 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
24023 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
24024 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
24025 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
24026 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
24027 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
24028 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
24029 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
24030 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
24031 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
24032 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
24033 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
24034 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
24035 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
24036 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
24037 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
24038 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
24039 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
24040 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
24041 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24042 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24043 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
24044 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
24045 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
24046 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
24047 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
24048 };
24049 #endif
24050
24051 /* Table of all conditional affixes. */
24052 static const struct asm_cond conds[] =
24053 {
24054 {"eq", 0x0},
24055 {"ne", 0x1},
24056 {"cs", 0x2}, {"hs", 0x2},
24057 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
24058 {"mi", 0x4},
24059 {"pl", 0x5},
24060 {"vs", 0x6},
24061 {"vc", 0x7},
24062 {"hi", 0x8},
24063 {"ls", 0x9},
24064 {"ge", 0xa},
24065 {"lt", 0xb},
24066 {"gt", 0xc},
24067 {"le", 0xd},
24068 {"al", 0xe}
24069 };
24070 static const struct asm_cond vconds[] =
24071 {
24072 {"t", 0xf},
24073 {"e", 0x10}
24074 };
24075
24076 #define UL_BARRIER(L,U,CODE,FEAT) \
24077 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
24078 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
24079
24080 static struct asm_barrier_opt barrier_opt_names[] =
24081 {
24082 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
24083 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
24084 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
24085 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
24086 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
24087 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
24088 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
24089 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
24090 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
24091 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
24092 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
24093 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
24094 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
24095 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
24096 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
24097 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
24098 };
24099
24100 #undef UL_BARRIER
24101
24102 /* Table of ARM-format instructions. */
24103
24104 /* Macros for gluing together operand strings. N.B. In all cases
24105 other than OPS0, the trailing OP_stop comes from default
24106 zero-initialization of the unspecified elements of the array. */
24107 #define OPS0() { OP_stop, }
24108 #define OPS1(a) { OP_##a, }
24109 #define OPS2(a,b) { OP_##a,OP_##b, }
24110 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
24111 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
24112 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
24113 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
24114
24115 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
24116 This is useful when mixing operands for ARM and THUMB, i.e. using the
24117 MIX_ARM_THUMB_OPERANDS macro.
24118 In order to use these macros, prefix the number of operands with _
24119 e.g. _3. */
24120 #define OPS_1(a) { a, }
24121 #define OPS_2(a,b) { a,b, }
24122 #define OPS_3(a,b,c) { a,b,c, }
24123 #define OPS_4(a,b,c,d) { a,b,c,d, }
24124 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
24125 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
24126
24127 /* These macros abstract out the exact format of the mnemonic table and
24128 save some repeated characters. */
24129
24130 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
24131 #define TxCE(mnem, op, top, nops, ops, ae, te) \
24132 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
24133 THUMB_VARIANT, do_##ae, do_##te, 0 }
24134
24135 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
24136 a T_MNEM_xyz enumerator. */
24137 #define TCE(mnem, aop, top, nops, ops, ae, te) \
24138 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
24139 #define tCE(mnem, aop, top, nops, ops, ae, te) \
24140 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24141
24142 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
24143 infix after the third character. */
24144 #define TxC3(mnem, op, top, nops, ops, ae, te) \
24145 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
24146 THUMB_VARIANT, do_##ae, do_##te, 0 }
24147 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
24148 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
24149 THUMB_VARIANT, do_##ae, do_##te, 0 }
24150 #define TC3(mnem, aop, top, nops, ops, ae, te) \
24151 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
24152 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
24153 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
24154 #define tC3(mnem, aop, top, nops, ops, ae, te) \
24155 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24156 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
24157 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24158
24159 /* Mnemonic that cannot be conditionalized. The ARM condition-code
24160 field is still 0xE. Many of the Thumb variants can be executed
24161 conditionally, so this is checked separately. */
24162 #define TUE(mnem, op, top, nops, ops, ae, te) \
24163 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24164 THUMB_VARIANT, do_##ae, do_##te, 0 }
24165
24166 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
24167 Used by mnemonics that have very minimal differences in the encoding for
24168 ARM and Thumb variants and can be handled in a common function. */
24169 #define TUEc(mnem, op, top, nops, ops, en) \
24170 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24171 THUMB_VARIANT, do_##en, do_##en, 0 }
24172
24173 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
24174 condition code field. */
24175 #define TUF(mnem, op, top, nops, ops, ae, te) \
24176 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
24177 THUMB_VARIANT, do_##ae, do_##te, 0 }
24178
24179 /* ARM-only variants of all the above. */
24180 #define CE(mnem, op, nops, ops, ae) \
24181 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24182
24183 #define C3(mnem, op, nops, ops, ae) \
24184 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24185
24186 /* Thumb-only variants of TCE and TUE. */
24187 #define ToC(mnem, top, nops, ops, te) \
24188 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24189 do_##te, 0 }
24190
24191 #define ToU(mnem, top, nops, ops, te) \
24192 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
24193 NULL, do_##te, 0 }
24194
24195 /* T_MNEM_xyz enumerator variants of ToC. */
24196 #define toC(mnem, top, nops, ops, te) \
24197 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
24198 do_##te, 0 }
24199
24200 /* T_MNEM_xyz enumerator variants of ToU. */
24201 #define toU(mnem, top, nops, ops, te) \
24202 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
24203 NULL, do_##te, 0 }
24204
24205 /* Legacy mnemonics that always have conditional infix after the third
24206 character. */
24207 #define CL(mnem, op, nops, ops, ae) \
24208 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24209 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24210
24211 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
24212 #define cCE(mnem, op, nops, ops, ae) \
24213 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24214
24215 /* mov instructions that are shared between coprocessor and MVE. */
24216 #define mcCE(mnem, op, nops, ops, ae) \
24217 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
24218
24219 /* Legacy coprocessor instructions where conditional infix and conditional
24220 suffix are ambiguous. For consistency this includes all FPA instructions,
24221 not just the potentially ambiguous ones. */
24222 #define cCL(mnem, op, nops, ops, ae) \
24223 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24224 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24225
24226 /* Coprocessor, takes either a suffix or a position-3 infix
24227 (for an FPA corner case). */
24228 #define C3E(mnem, op, nops, ops, ae) \
24229 { mnem, OPS##nops ops, OT_csuf_or_in3, \
24230 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24231
24232 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
24233 { m1 #m2 m3, OPS##nops ops, \
24234 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
24235 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24236
24237 #define CM(m1, m2, op, nops, ops, ae) \
24238 xCM_ (m1, , m2, op, nops, ops, ae), \
24239 xCM_ (m1, eq, m2, op, nops, ops, ae), \
24240 xCM_ (m1, ne, m2, op, nops, ops, ae), \
24241 xCM_ (m1, cs, m2, op, nops, ops, ae), \
24242 xCM_ (m1, hs, m2, op, nops, ops, ae), \
24243 xCM_ (m1, cc, m2, op, nops, ops, ae), \
24244 xCM_ (m1, ul, m2, op, nops, ops, ae), \
24245 xCM_ (m1, lo, m2, op, nops, ops, ae), \
24246 xCM_ (m1, mi, m2, op, nops, ops, ae), \
24247 xCM_ (m1, pl, m2, op, nops, ops, ae), \
24248 xCM_ (m1, vs, m2, op, nops, ops, ae), \
24249 xCM_ (m1, vc, m2, op, nops, ops, ae), \
24250 xCM_ (m1, hi, m2, op, nops, ops, ae), \
24251 xCM_ (m1, ls, m2, op, nops, ops, ae), \
24252 xCM_ (m1, ge, m2, op, nops, ops, ae), \
24253 xCM_ (m1, lt, m2, op, nops, ops, ae), \
24254 xCM_ (m1, gt, m2, op, nops, ops, ae), \
24255 xCM_ (m1, le, m2, op, nops, ops, ae), \
24256 xCM_ (m1, al, m2, op, nops, ops, ae)
24257
24258 #define UE(mnem, op, nops, ops, ae) \
24259 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24260
24261 #define UF(mnem, op, nops, ops, ae) \
24262 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24263
24264 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
24265 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
24266 use the same encoding function for each. */
24267 #define NUF(mnem, op, nops, ops, enc) \
24268 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24269 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24270
24271 /* Neon data processing, version which indirects through neon_enc_tab for
24272 the various overloaded versions of opcodes. */
24273 #define nUF(mnem, op, nops, ops, enc) \
24274 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24275 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24276
24277 /* Neon insn with conditional suffix for the ARM version, non-overloaded
24278 version. */
24279 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24280 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
24281 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24282
24283 #define NCE(mnem, op, nops, ops, enc) \
24284 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24285
24286 #define NCEF(mnem, op, nops, ops, enc) \
24287 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24288
24289 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
24290 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24291 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
24292 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24293
24294 #define nCE(mnem, op, nops, ops, enc) \
24295 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24296
24297 #define nCEF(mnem, op, nops, ops, enc) \
24298 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24299
24300 /* */
24301 #define mCEF(mnem, op, nops, ops, enc) \
24302 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
24303 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24304
24305
24306 /* nCEF but for MVE predicated instructions. */
24307 #define mnCEF(mnem, op, nops, ops, enc) \
24308 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24309
24310 /* nCE but for MVE predicated instructions. */
24311 #define mnCE(mnem, op, nops, ops, enc) \
24312 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24313
24314 /* NUF but for potentially MVE predicated instructions. */
24315 #define MNUF(mnem, op, nops, ops, enc) \
24316 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24317 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24318
24319 /* nUF but for potentially MVE predicated instructions. */
24320 #define mnUF(mnem, op, nops, ops, enc) \
24321 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24322 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24323
24324 /* ToC but for potentially MVE predicated instructions. */
24325 #define mToC(mnem, top, nops, ops, te) \
24326 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24327 do_##te, 1 }
24328
24329 /* NCE but for MVE predicated instructions. */
24330 #define MNCE(mnem, op, nops, ops, enc) \
24331 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24332
24333 /* NCEF but for MVE predicated instructions. */
24334 #define MNCEF(mnem, op, nops, ops, enc) \
24335 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24336 #define do_0 0
24337
24338 static const struct asm_opcode insns[] =
24339 {
24340 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
24341 #define THUMB_VARIANT & arm_ext_v4t
24342 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
24343 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
24344 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
24345 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
24346 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
24347 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
24348 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
24349 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
24350 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
24351 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
24352 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
24353 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
24354 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
24355 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
24356 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
24357 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
24358
24359 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
24360 for setting PSR flag bits. They are obsolete in V6 and do not
24361 have Thumb equivalents. */
24362 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
24363 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
24364 CL("tstp", 110f000, 2, (RR, SH), cmp),
24365 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
24366 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
24367 CL("cmpp", 150f000, 2, (RR, SH), cmp),
24368 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
24369 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
24370 CL("cmnp", 170f000, 2, (RR, SH), cmp),
24371
24372 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
24373 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
24374 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
24375 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
24376
24377 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
24378 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24379 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
24380 OP_RRnpc),
24381 OP_ADDRGLDR),ldst, t_ldst),
24382 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24383
24384 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24385 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24386 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24387 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24388 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24389 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24390
24391 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
24392 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
24393
24394 /* Pseudo ops. */
24395 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
24396 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
24397 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
24398 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
24399
24400 /* Thumb-compatibility pseudo ops. */
24401 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
24402 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
24403 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
24404 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
24405 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
24406 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
24407 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
24408 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
24409 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
24410 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
24411 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
24412 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
24413
24414 /* These may simplify to neg. */
24415 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
24416 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
24417
24418 #undef THUMB_VARIANT
24419 #define THUMB_VARIANT & arm_ext_os
24420
24421 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
24422 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
24423
24424 #undef THUMB_VARIANT
24425 #define THUMB_VARIANT & arm_ext_v6
24426
24427 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
24428
24429 /* V1 instructions with no Thumb analogue prior to V6T2. */
24430 #undef THUMB_VARIANT
24431 #define THUMB_VARIANT & arm_ext_v6t2
24432
24433 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
24434 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
24435 CL("teqp", 130f000, 2, (RR, SH), cmp),
24436
24437 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24438 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24439 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
24440 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24441
24442 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24443 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24444
24445 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24446 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24447
24448 /* V1 instructions with no Thumb analogue at all. */
24449 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
24450 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
24451
24452 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
24453 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
24454 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
24455 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
24456 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
24457 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
24458 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
24459 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
24460
24461 #undef ARM_VARIANT
24462 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
24463 #undef THUMB_VARIANT
24464 #define THUMB_VARIANT & arm_ext_v4t
24465
24466 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
24467 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
24468
24469 #undef THUMB_VARIANT
24470 #define THUMB_VARIANT & arm_ext_v6t2
24471
24472 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24473 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
24474
24475 /* Generic coprocessor instructions. */
24476 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
24477 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24478 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24479 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24480 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24481 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24482 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
24483
24484 #undef ARM_VARIANT
24485 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
24486
24487 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24488 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24489
24490 #undef ARM_VARIANT
24491 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
24492 #undef THUMB_VARIANT
24493 #define THUMB_VARIANT & arm_ext_msr
24494
24495 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
24496 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
24497
24498 #undef ARM_VARIANT
24499 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
24500 #undef THUMB_VARIANT
24501 #define THUMB_VARIANT & arm_ext_v6t2
24502
24503 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24504 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24505 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24506 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24507 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24508 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24509 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24510 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24511
24512 #undef ARM_VARIANT
24513 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
24514 #undef THUMB_VARIANT
24515 #define THUMB_VARIANT & arm_ext_v4t
24516
24517 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24518 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24519 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24520 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24521 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24522 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24523
24524 #undef ARM_VARIANT
24525 #define ARM_VARIANT & arm_ext_v4t_5
24526
24527 /* ARM Architecture 4T. */
24528 /* Note: bx (and blx) are required on V5, even if the processor does
24529 not support Thumb. */
24530 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
24531
24532 #undef ARM_VARIANT
24533 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
24534 #undef THUMB_VARIANT
24535 #define THUMB_VARIANT & arm_ext_v5t
24536
24537 /* Note: blx has 2 variants; the .value coded here is for
24538 BLX(2). Only this variant has conditional execution. */
24539 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
24540 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
24541
24542 #undef THUMB_VARIANT
24543 #define THUMB_VARIANT & arm_ext_v6t2
24544
24545 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
24546 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24547 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24548 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24549 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24550 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
24551 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24552 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24553
24554 #undef ARM_VARIANT
24555 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
24556 #undef THUMB_VARIANT
24557 #define THUMB_VARIANT & arm_ext_v5exp
24558
24559 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24560 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24561 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24562 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24563
24564 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24565 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24566
24567 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24568 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24569 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24570 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24571
24572 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24573 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24574 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24575 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24576
24577 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24578 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24579
24580 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24581 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24582 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24583 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24584
24585 #undef ARM_VARIANT
24586 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
24587 #undef THUMB_VARIANT
24588 #define THUMB_VARIANT & arm_ext_v6t2
24589
24590 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
24591 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24592 ldrd, t_ldstd),
24593 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24594 ADDRGLDRS), ldrd, t_ldstd),
24595
24596 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24597 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24598
24599 #undef ARM_VARIANT
24600 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
24601
24602 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
24603
24604 #undef ARM_VARIANT
24605 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
24606 #undef THUMB_VARIANT
24607 #define THUMB_VARIANT & arm_ext_v6
24608
24609 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
24610 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
24611 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24612 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24613 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24614 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24615 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24616 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24617 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24618 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
24619
24620 #undef THUMB_VARIANT
24621 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24622
24623 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
24624 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24625 strex, t_strex),
24626 #undef THUMB_VARIANT
24627 #define THUMB_VARIANT & arm_ext_v6t2
24628
24629 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24630 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24631
24632 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
24633 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
24634
24635 /* ARM V6 not included in V7M. */
24636 #undef THUMB_VARIANT
24637 #define THUMB_VARIANT & arm_ext_v6_notm
24638 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24639 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24640 UF(rfeib, 9900a00, 1, (RRw), rfe),
24641 UF(rfeda, 8100a00, 1, (RRw), rfe),
24642 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24643 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24644 UF(rfefa, 8100a00, 1, (RRw), rfe),
24645 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24646 UF(rfeed, 9900a00, 1, (RRw), rfe),
24647 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24648 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24649 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24650 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
24651 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
24652 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
24653 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
24654 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24655 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24656 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
24657
24658 /* ARM V6 not included in V7M (eg. integer SIMD). */
24659 #undef THUMB_VARIANT
24660 #define THUMB_VARIANT & arm_ext_v6_dsp
24661 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
24662 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
24663 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24664 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24665 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24666 /* Old name for QASX. */
24667 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24668 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24669 /* Old name for QSAX. */
24670 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24671 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24672 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24673 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24674 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24675 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24676 /* Old name for SASX. */
24677 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24678 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24679 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24680 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24681 /* Old name for SHASX. */
24682 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24683 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24684 /* Old name for SHSAX. */
24685 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24686 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24687 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24688 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24689 /* Old name for SSAX. */
24690 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24691 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24692 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24693 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24694 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24695 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24696 /* Old name for UASX. */
24697 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24698 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24699 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24700 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24701 /* Old name for UHASX. */
24702 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24703 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24704 /* Old name for UHSAX. */
24705 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24706 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24707 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24708 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24709 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24710 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24711 /* Old name for UQASX. */
24712 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24713 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24714 /* Old name for UQSAX. */
24715 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24716 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24717 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24718 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24719 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24720 /* Old name for USAX. */
24721 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24722 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24723 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24724 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24725 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24726 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24727 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24728 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24729 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24730 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24731 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24732 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24733 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24734 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24735 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24736 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24737 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24738 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24739 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24740 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24741 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24742 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24743 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24744 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24745 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24746 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24747 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24748 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24749 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24750 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
24751 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
24752 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24753 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24754 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
24755
24756 #undef ARM_VARIANT
24757 #define ARM_VARIANT & arm_ext_v6k_v6t2
24758 #undef THUMB_VARIANT
24759 #define THUMB_VARIANT & arm_ext_v6k_v6t2
24760
24761 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
24762 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
24763 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
24764 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
24765
24766 #undef THUMB_VARIANT
24767 #define THUMB_VARIANT & arm_ext_v6_notm
24768 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24769 ldrexd, t_ldrexd),
24770 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24771 RRnpcb), strexd, t_strexd),
24772
24773 #undef THUMB_VARIANT
24774 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24775 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24776 rd_rn, rd_rn),
24777 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24778 rd_rn, rd_rn),
24779 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24780 strex, t_strexbh),
24781 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24782 strex, t_strexbh),
24783 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
24784
24785 #undef ARM_VARIANT
24786 #define ARM_VARIANT & arm_ext_sec
24787 #undef THUMB_VARIANT
24788 #define THUMB_VARIANT & arm_ext_sec
24789
24790 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
24791
24792 #undef ARM_VARIANT
24793 #define ARM_VARIANT & arm_ext_virt
24794 #undef THUMB_VARIANT
24795 #define THUMB_VARIANT & arm_ext_virt
24796
24797 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24798 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
24799
24800 #undef ARM_VARIANT
24801 #define ARM_VARIANT & arm_ext_pan
24802 #undef THUMB_VARIANT
24803 #define THUMB_VARIANT & arm_ext_pan
24804
24805 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
24806
24807 #undef ARM_VARIANT
24808 #define ARM_VARIANT & arm_ext_v6t2
24809 #undef THUMB_VARIANT
24810 #define THUMB_VARIANT & arm_ext_v6t2
24811
24812 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
24813 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24814 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24815 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24816
24817 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24818 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
24819
24820 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24821 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24822 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24823 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24824
24825 #undef ARM_VARIANT
24826 #define ARM_VARIANT & arm_ext_v3
24827 #undef THUMB_VARIANT
24828 #define THUMB_VARIANT & arm_ext_v6t2
24829
24830 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
24831 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24832 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24833
24834 #undef ARM_VARIANT
24835 #define ARM_VARIANT & arm_ext_v6t2
24836 #undef THUMB_VARIANT
24837 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24838 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
24839 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
24840
24841 /* Thumb-only instructions. */
24842 #undef ARM_VARIANT
24843 #define ARM_VARIANT NULL
24844 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
24845 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
24846
24847 /* ARM does not really have an IT instruction, so always allow it.
24848 The opcode is copied from Thumb in order to allow warnings in
24849 -mimplicit-it=[never | arm] modes. */
24850 #undef ARM_VARIANT
24851 #define ARM_VARIANT & arm_ext_v1
24852 #undef THUMB_VARIANT
24853 #define THUMB_VARIANT & arm_ext_v6t2
24854
24855 TUE("it", bf08, bf08, 1, (COND), it, t_it),
24856 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
24857 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
24858 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
24859 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
24860 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
24861 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
24862 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
24863 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
24864 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
24865 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
24866 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
24867 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
24868 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
24869 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
24870 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
24871 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24872 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24873
24874 /* Thumb2 only instructions. */
24875 #undef ARM_VARIANT
24876 #define ARM_VARIANT NULL
24877
24878 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24879 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24880 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
24881 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
24882 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
24883 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
24884
24885 /* Hardware division instructions. */
24886 #undef ARM_VARIANT
24887 #define ARM_VARIANT & arm_ext_adiv
24888 #undef THUMB_VARIANT
24889 #define THUMB_VARIANT & arm_ext_div
24890
24891 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24892 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24893
24894 /* ARM V6M/V7 instructions. */
24895 #undef ARM_VARIANT
24896 #define ARM_VARIANT & arm_ext_barrier
24897 #undef THUMB_VARIANT
24898 #define THUMB_VARIANT & arm_ext_barrier
24899
24900 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24901 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24902 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24903
24904 /* ARM V7 instructions. */
24905 #undef ARM_VARIANT
24906 #define ARM_VARIANT & arm_ext_v7
24907 #undef THUMB_VARIANT
24908 #define THUMB_VARIANT & arm_ext_v7
24909
24910 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
24911 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
24912
24913 #undef ARM_VARIANT
24914 #define ARM_VARIANT & arm_ext_mp
24915 #undef THUMB_VARIANT
24916 #define THUMB_VARIANT & arm_ext_mp
24917
24918 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
24919
24920 /* AArchv8 instructions. */
24921 #undef ARM_VARIANT
24922 #define ARM_VARIANT & arm_ext_v8
24923
24924 /* Instructions shared between armv8-a and armv8-m. */
24925 #undef THUMB_VARIANT
24926 #define THUMB_VARIANT & arm_ext_atomics
24927
24928 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24929 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24930 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24931 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24932 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24933 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24934 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24935 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
24936 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24937 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24938 stlex, t_stlex),
24939 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24940 stlex, t_stlex),
24941 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24942 stlex, t_stlex),
24943 #undef THUMB_VARIANT
24944 #define THUMB_VARIANT & arm_ext_v8
24945
24946 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
24947 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24948 ldrexd, t_ldrexd),
24949 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24950 strexd, t_strexd),
24951
24952 /* Defined in V8 but is in undefined encoding space for earlier
24953 architectures. However earlier architectures are required to treat
24954 this instuction as a semihosting trap as well. Hence while not explicitly
24955 defined as such, it is in fact correct to define the instruction for all
24956 architectures. */
24957 #undef THUMB_VARIANT
24958 #define THUMB_VARIANT & arm_ext_v1
24959 #undef ARM_VARIANT
24960 #define ARM_VARIANT & arm_ext_v1
24961 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
24962
24963 /* ARMv8 T32 only. */
24964 #undef ARM_VARIANT
24965 #define ARM_VARIANT NULL
24966 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
24967 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
24968 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
24969
24970 /* FP for ARMv8. */
24971 #undef ARM_VARIANT
24972 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
24973 #undef THUMB_VARIANT
24974 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24975
24976 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
24977 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
24978 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
24979 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
24980 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
24981 mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintz),
24982 mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintx),
24983 mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrinta),
24984 mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintn),
24985 mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintp),
24986 mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintm),
24987
24988 /* Crypto v1 extensions. */
24989 #undef ARM_VARIANT
24990 #define ARM_VARIANT & fpu_crypto_ext_armv8
24991 #undef THUMB_VARIANT
24992 #define THUMB_VARIANT & fpu_crypto_ext_armv8
24993
24994 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
24995 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
24996 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
24997 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
24998 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
24999 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
25000 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
25001 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
25002 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
25003 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
25004 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
25005 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
25006 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
25007 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
25008
25009 #undef ARM_VARIANT
25010 #define ARM_VARIANT & arm_ext_crc
25011 #undef THUMB_VARIANT
25012 #define THUMB_VARIANT & arm_ext_crc
25013 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
25014 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
25015 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
25016 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
25017 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
25018 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
25019
25020 /* ARMv8.2 RAS extension. */
25021 #undef ARM_VARIANT
25022 #define ARM_VARIANT & arm_ext_ras
25023 #undef THUMB_VARIANT
25024 #define THUMB_VARIANT & arm_ext_ras
25025 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
25026
25027 #undef ARM_VARIANT
25028 #define ARM_VARIANT & arm_ext_v8_3
25029 #undef THUMB_VARIANT
25030 #define THUMB_VARIANT & arm_ext_v8_3
25031 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
25032
25033 #undef ARM_VARIANT
25034 #define ARM_VARIANT & fpu_neon_ext_dotprod
25035 #undef THUMB_VARIANT
25036 #define THUMB_VARIANT & fpu_neon_ext_dotprod
25037 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
25038 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
25039
25040 #undef ARM_VARIANT
25041 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
25042 #undef THUMB_VARIANT
25043 #define THUMB_VARIANT NULL
25044
25045 cCE("wfs", e200110, 1, (RR), rd),
25046 cCE("rfs", e300110, 1, (RR), rd),
25047 cCE("wfc", e400110, 1, (RR), rd),
25048 cCE("rfc", e500110, 1, (RR), rd),
25049
25050 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
25051 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
25052 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
25053 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
25054
25055 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
25056 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
25057 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
25058 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
25059
25060 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
25061 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
25062 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
25063 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
25064 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
25065 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
25066 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
25067 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
25068 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
25069 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
25070 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
25071 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
25072
25073 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
25074 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
25075 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
25076 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
25077 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
25078 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
25079 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
25080 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
25081 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
25082 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
25083 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
25084 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
25085
25086 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
25087 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
25088 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
25089 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
25090 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
25091 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
25092 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
25093 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
25094 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
25095 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
25096 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
25097 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
25098
25099 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
25100 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
25101 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
25102 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
25103 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
25104 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
25105 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
25106 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
25107 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
25108 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
25109 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
25110 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
25111
25112 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
25113 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
25114 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
25115 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
25116 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
25117 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
25118 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
25119 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
25120 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
25121 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
25122 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
25123 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
25124
25125 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
25126 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
25127 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
25128 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
25129 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
25130 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
25131 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
25132 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
25133 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
25134 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
25135 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
25136 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
25137
25138 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
25139 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
25140 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
25141 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
25142 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
25143 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
25144 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
25145 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
25146 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
25147 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
25148 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
25149 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
25150
25151 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
25152 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
25153 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
25154 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
25155 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
25156 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
25157 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
25158 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
25159 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
25160 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
25161 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
25162 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
25163
25164 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
25165 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
25166 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
25167 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
25168 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
25169 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
25170 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
25171 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
25172 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
25173 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
25174 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
25175 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
25176
25177 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
25178 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
25179 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
25180 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
25181 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
25182 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
25183 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
25184 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
25185 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
25186 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
25187 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
25188 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
25189
25190 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
25191 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
25192 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
25193 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
25194 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
25195 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
25196 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
25197 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
25198 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
25199 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
25200 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
25201 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
25202
25203 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
25204 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
25205 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
25206 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
25207 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
25208 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
25209 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
25210 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
25211 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
25212 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
25213 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
25214 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
25215
25216 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
25217 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
25218 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
25219 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
25220 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
25221 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
25222 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
25223 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
25224 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
25225 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
25226 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
25227 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
25228
25229 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
25230 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
25231 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
25232 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
25233 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
25234 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
25235 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
25236 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
25237 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
25238 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
25239 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
25240 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
25241
25242 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
25243 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
25244 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
25245 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
25246 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
25247 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
25248 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
25249 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
25250 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
25251 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
25252 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
25253 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
25254
25255 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
25256 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
25257 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
25258 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
25259 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
25260 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
25261 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
25262 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
25263 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
25264 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
25265 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
25266 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
25267
25268 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
25269 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
25270 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
25271 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
25272 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
25273 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25274 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25275 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25276 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
25277 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
25278 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
25279 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
25280
25281 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
25282 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
25283 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
25284 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
25285 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
25286 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25287 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25288 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25289 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
25290 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
25291 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
25292 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
25293
25294 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
25295 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
25296 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
25297 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
25298 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
25299 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25300 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25301 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25302 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
25303 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
25304 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
25305 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
25306
25307 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
25308 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
25309 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
25310 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
25311 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
25312 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25313 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25314 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25315 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
25316 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
25317 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
25318 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
25319
25320 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
25321 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
25322 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
25323 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
25324 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
25325 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25326 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25327 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25328 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
25329 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
25330 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
25331 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
25332
25333 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
25334 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
25335 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
25336 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
25337 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
25338 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25339 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25340 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25341 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
25342 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
25343 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
25344 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
25345
25346 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
25347 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
25348 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
25349 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
25350 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
25351 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25352 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25353 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25354 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
25355 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
25356 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
25357 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
25358
25359 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
25360 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
25361 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
25362 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
25363 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
25364 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25365 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25366 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25367 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
25368 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
25369 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
25370 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
25371
25372 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
25373 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
25374 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
25375 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
25376 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
25377 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25378 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25379 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25380 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
25381 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
25382 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
25383 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
25384
25385 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
25386 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
25387 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
25388 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
25389 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
25390 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25391 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25392 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25393 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
25394 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
25395 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
25396 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
25397
25398 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25399 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25400 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25401 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25402 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25403 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25404 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25405 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25406 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25407 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25408 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25409 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25410
25411 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25412 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25413 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25414 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25415 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25416 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25417 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25418 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25419 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25420 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25421 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25422 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25423
25424 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25425 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25426 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25427 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25428 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25429 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25430 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25431 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25432 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25433 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25434 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25435 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25436
25437 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
25438 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
25439 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
25440 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
25441
25442 cCL("flts", e000110, 2, (RF, RR), rn_rd),
25443 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
25444 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
25445 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
25446 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
25447 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
25448 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
25449 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
25450 cCL("flte", e080110, 2, (RF, RR), rn_rd),
25451 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
25452 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
25453 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
25454
25455 /* The implementation of the FIX instruction is broken on some
25456 assemblers, in that it accepts a precision specifier as well as a
25457 rounding specifier, despite the fact that this is meaningless.
25458 To be more compatible, we accept it as well, though of course it
25459 does not set any bits. */
25460 cCE("fix", e100110, 2, (RR, RF), rd_rm),
25461 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
25462 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
25463 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
25464 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
25465 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
25466 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
25467 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
25468 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
25469 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
25470 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
25471 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
25472 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
25473
25474 /* Instructions that were new with the real FPA, call them V2. */
25475 #undef ARM_VARIANT
25476 #define ARM_VARIANT & fpu_fpa_ext_v2
25477
25478 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25479 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25480 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25481 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25482 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25483 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25484
25485 #undef ARM_VARIANT
25486 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
25487 #undef THUMB_VARIANT
25488 #define THUMB_VARIANT & arm_ext_v6t2
25489 mcCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs),
25490 mcCE(vmsr, ee00a10, 2, (RVC, RR), vmsr),
25491 mcCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
25492 mcCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
25493 mcCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
25494 mcCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
25495
25496 /* Memory operations. */
25497 mcCE(fldmias, c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25498 mcCE(fldmdbs, d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25499 mcCE(fstmias, c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25500 mcCE(fstmdbs, d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25501 #undef THUMB_VARIANT
25502
25503 /* Moves and type conversions. */
25504 cCE("fmstat", ef1fa10, 0, (), noargs),
25505 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
25506 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
25507 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
25508 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
25509 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
25510 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
25511 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
25512 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
25513
25514 /* Memory operations. */
25515 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25516 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25517 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25518 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25519 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25520 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25521 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25522 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25523 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25524 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25525 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25526 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25527
25528 /* Monadic operations. */
25529 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
25530 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
25531 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
25532
25533 /* Dyadic operations. */
25534 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25535 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25536 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25537 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25538 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25539 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25540 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25541 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25542 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25543
25544 /* Comparisons. */
25545 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
25546 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
25547 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
25548 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
25549
25550 /* Double precision load/store are still present on single precision
25551 implementations. */
25552 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25553 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25554 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25555 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25556 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25557 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25558 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25559 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25560
25561 #undef ARM_VARIANT
25562 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
25563
25564 /* Moves and type conversions. */
25565 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25566 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25567 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
25568 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
25569 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
25570 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
25571 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25572 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
25573 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25574 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25575 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25576 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25577
25578 /* Monadic operations. */
25579 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25580 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25581 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25582
25583 /* Dyadic operations. */
25584 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25585 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25586 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25587 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25588 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25589 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25590 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25591 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25592 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25593
25594 /* Comparisons. */
25595 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25596 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
25597 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25598 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
25599
25600 /* Instructions which may belong to either the Neon or VFP instruction sets.
25601 Individual encoder functions perform additional architecture checks. */
25602 #undef ARM_VARIANT
25603 #define ARM_VARIANT & fpu_vfp_ext_v1xd
25604 #undef THUMB_VARIANT
25605 #define THUMB_VARIANT & arm_ext_v6t2
25606
25607 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25608 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25609 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25610 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25611 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25612 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25613
25614 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
25615 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
25616
25617 #undef THUMB_VARIANT
25618 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
25619
25620 /* These mnemonics are unique to VFP. */
25621 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
25622 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25623 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25624 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25625 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25626 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
25627
25628 /* Mnemonics shared by Neon and VFP. */
25629 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25630
25631 mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25632 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
25633 MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25634 MNCEF(vcvtt, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25635
25636
25637 /* NOTE: All VMOV encoding is special-cased! */
25638 NCE(vmovq, 0, 1, (VMOV), neon_mov),
25639
25640 #undef THUMB_VARIANT
25641 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25642 by different feature bits. Since we are setting the Thumb guard, we can
25643 require Thumb-1 which makes it a nop guard and set the right feature bit in
25644 do_vldr_vstr (). */
25645 #define THUMB_VARIANT & arm_ext_v4t
25646 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25647 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25648
25649 #undef ARM_VARIANT
25650 #define ARM_VARIANT & arm_ext_fp16
25651 #undef THUMB_VARIANT
25652 #define THUMB_VARIANT & arm_ext_fp16
25653 /* New instructions added from v8.2, allowing the extraction and insertion of
25654 the upper 16 bits of a 32-bit vector register. */
25655 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
25656 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
25657
25658 /* New backported fma/fms instructions optional in v8.2. */
25659 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25660 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25661
25662 #undef THUMB_VARIANT
25663 #define THUMB_VARIANT & fpu_neon_ext_v1
25664 #undef ARM_VARIANT
25665 #define ARM_VARIANT & fpu_neon_ext_v1
25666
25667 /* Data processing with three registers of the same length. */
25668 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
25669 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
25670 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
25671 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25672 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25673 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25674 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
25675 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25676 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25677 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25678 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25679 /* If not immediate, fall back to neon_dyadic_i64_su.
25680 shl should accept I8 I16 I32 I64,
25681 qshl should accept S8 S16 S32 S64 U8 U16 U32 U64. */
25682 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl),
25683 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl),
25684 /* Logic ops, types optional & ignored. */
25685 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25686 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25687 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25688 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25689 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
25690 /* Bitfield ops, untyped. */
25691 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25692 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25693 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25694 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25695 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25696 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25697 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
25698 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25699 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25700 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25701 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25702 back to neon_dyadic_if_su. */
25703 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25704 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25705 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25706 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25707 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25708 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25709 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25710 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25711 /* Comparison. Type I8 I16 I32 F32. */
25712 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25713 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
25714 /* As above, D registers only. */
25715 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25716 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25717 /* Int and float variants, signedness unimportant. */
25718 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25719 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25720 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
25721 /* Add/sub take types I8 I16 I32 I64 F32. */
25722 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25723 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25724 /* vtst takes sizes 8, 16, 32. */
25725 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25726 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
25727 /* VMUL takes I8 I16 I32 F32 P8. */
25728 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
25729 /* VQD{R}MULH takes S16 S32. */
25730 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25731 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25732 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25733 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25734 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25735 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25736 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25737 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25738 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25739 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25740 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25741 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25742 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25743 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25744 /* ARM v8.1 extension. */
25745 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25746 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25747 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25748
25749 /* Two address, int/float. Types S8 S16 S32 F32. */
25750 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
25751 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
25752
25753 /* Data processing with two registers and a shift amount. */
25754 /* Right shifts, and variants with rounding.
25755 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
25756 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25757 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25758 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25759 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25760 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25761 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25762 /* Shift and insert. Sizes accepted 8 16 32 64. */
25763 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
25764 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
25765 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
25766 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
25767 /* Right shift immediate, saturating & narrowing, with rounding variants.
25768 Types accepted S16 S32 S64 U16 U32 U64. */
25769 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25770 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25771 /* As above, unsigned. Types accepted S16 S32 S64. */
25772 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25773 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25774 /* Right shift narrowing. Types accepted I16 I32 I64. */
25775 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25776 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25777 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
25778 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
25779 /* CVT with optional immediate for fixed-point variant. */
25780 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
25781
25782 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
25783
25784 /* Data processing, three registers of different lengths. */
25785 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
25786 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
25787 /* If not scalar, fall back to neon_dyadic_long.
25788 Vector types as above, scalar types S16 S32 U16 U32. */
25789 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25790 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25791 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
25792 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25793 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25794 /* Dyadic, narrowing insns. Types I16 I32 I64. */
25795 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25796 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25797 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25798 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25799 /* Saturating doubling multiplies. Types S16 S32. */
25800 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25801 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25802 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25803 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25804 S16 S32 U16 U32. */
25805 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
25806
25807 /* Extract. Size 8. */
25808 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25809 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
25810
25811 /* Two registers, miscellaneous. */
25812 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
25813 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
25814 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
25815 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
25816 /* Vector replicate. Sizes 8 16 32. */
25817 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
25818 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
25819 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
25820 /* VMOVN. Types I16 I32 I64. */
25821 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
25822 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
25823 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
25824 /* VQMOVUN. Types S16 S32 S64. */
25825 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
25826 /* VZIP / VUZP. Sizes 8 16 32. */
25827 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
25828 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
25829 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
25830 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
25831 /* VQABS / VQNEG. Types S8 S16 S32. */
25832 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
25833 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
25834 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
25835 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
25836 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
25837 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
25838 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
25839 /* Reciprocal estimates. Types U32 F16 F32. */
25840 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
25841 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
25842 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
25843 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
25844 /* VCLS. Types S8 S16 S32. */
25845 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
25846 /* VCLZ. Types I8 I16 I32. */
25847 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
25848 /* VCNT. Size 8. */
25849 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
25850 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
25851 /* Two address, untyped. */
25852 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
25853 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
25854 /* VTRN. Sizes 8 16 32. */
25855 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
25856 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
25857
25858 /* Table lookup. Size 8. */
25859 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25860 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25861
25862 #undef THUMB_VARIANT
25863 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
25864 #undef ARM_VARIANT
25865 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
25866
25867 /* Neon element/structure load/store. */
25868 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25869 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25870 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25871 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25872 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25873 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25874 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25875 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25876
25877 #undef THUMB_VARIANT
25878 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
25879 #undef ARM_VARIANT
25880 #define ARM_VARIANT & fpu_vfp_ext_v3xd
25881 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
25882 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25883 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25884 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25885 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25886 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25887 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25888 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25889 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25890
25891 #undef THUMB_VARIANT
25892 #define THUMB_VARIANT & fpu_vfp_ext_v3
25893 #undef ARM_VARIANT
25894 #define ARM_VARIANT & fpu_vfp_ext_v3
25895
25896 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
25897 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25898 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25899 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25900 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25901 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25902 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25903 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25904 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25905
25906 #undef ARM_VARIANT
25907 #define ARM_VARIANT & fpu_vfp_ext_fma
25908 #undef THUMB_VARIANT
25909 #define THUMB_VARIANT & fpu_vfp_ext_fma
25910 /* Mnemonics shared by Neon, VFP, MVE and BF16. These are included in the
25911 VFP FMA variant; NEON and VFP FMA always includes the NEON
25912 FMA instructions. */
25913 mnCEF(vfma, _vfma, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25914 TUF ("vfmat", c300850, fc300850, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25915 mnCEF(vfms, _vfms, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), neon_fmac),
25916
25917 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25918 the v form should always be used. */
25919 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25920 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25921 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25922 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25923 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25924 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25925
25926 #undef THUMB_VARIANT
25927 #undef ARM_VARIANT
25928 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
25929
25930 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25931 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25932 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25933 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25934 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25935 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25936 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25937 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25938
25939 #undef ARM_VARIANT
25940 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
25941
25942 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
25943 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
25944 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
25945 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
25946 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
25947 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
25948 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
25949 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
25950 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
25951 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25952 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25953 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25954 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25955 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25956 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25957 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25958 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25959 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25960 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
25961 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
25962 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25963 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25964 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25965 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25966 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25967 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25968 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
25969 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
25970 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
25971 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
25972 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
25973 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
25974 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
25975 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
25976 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
25977 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
25978 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
25979 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25980 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25981 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25982 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25983 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25984 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25985 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25986 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25987 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25988 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
25989 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25990 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25991 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25992 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25993 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25994 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25995 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25996 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25997 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25998 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25999 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26000 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26001 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26002 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26003 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26004 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26005 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26006 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26007 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26008 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26009 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26010 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
26011 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
26012 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26013 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26014 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26015 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26016 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26017 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26018 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26019 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26020 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26021 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26022 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26023 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26024 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26025 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26026 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26027 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26028 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26029 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26030 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
26031 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26032 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26033 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26034 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26035 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26036 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26037 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26038 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26039 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26040 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26041 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26042 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26043 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26044 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26045 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26046 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26047 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26048 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26049 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26050 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26051 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26052 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
26053 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26054 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26055 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26056 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26057 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26058 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26059 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26060 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26061 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26062 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26063 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26064 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26065 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26066 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26067 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26068 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26069 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26070 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26071 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26072 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26073 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
26074 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
26075 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26076 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26077 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26078 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26079 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26080 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26081 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26082 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26083 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26084 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
26085 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
26086 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
26087 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
26088 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
26089 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
26090 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26091 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26092 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26093 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
26094 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
26095 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
26096 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
26097 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
26098 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
26099 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26100 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26101 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26102 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26103 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
26104
26105 #undef ARM_VARIANT
26106 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
26107
26108 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
26109 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
26110 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
26111 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
26112 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
26113 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
26114 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26115 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26116 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26117 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26118 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26119 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26120 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26121 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26122 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26123 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26124 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26125 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26126 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26127 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26128 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
26129 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26130 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26131 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26132 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26133 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26134 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26135 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26136 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26137 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26138 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26139 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26140 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26141 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26142 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26143 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26144 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26145 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26146 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26147 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26148 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26149 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26150 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26151 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26152 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26153 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26154 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26155 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26156 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26157 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26158 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26159 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26160 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26161 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26162 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26163 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26164 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26165
26166 #undef ARM_VARIANT
26167 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
26168
26169 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
26170 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
26171 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
26172 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
26173 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
26174 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
26175 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
26176 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
26177 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
26178 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
26179 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
26180 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
26181 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
26182 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
26183 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
26184 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
26185 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
26186 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
26187 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
26188 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
26189 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
26190 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
26191 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
26192 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
26193 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
26194 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
26195 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
26196 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
26197 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
26198 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
26199 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
26200 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
26201 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
26202 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
26203 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
26204 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
26205 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
26206 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
26207 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
26208 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
26209 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
26210 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
26211 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
26212 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
26213 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
26214 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
26215 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
26216 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
26217 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
26218 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
26219 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
26220 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
26221 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
26222 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
26223 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
26224 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
26225 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
26226 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
26227 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
26228 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
26229 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
26230 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
26231 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
26232 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
26233 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26234 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26235 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26236 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26237 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26238 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26239 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26240 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26241 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26242 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26243 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26244 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26245
26246 /* ARMv8.5-A instructions. */
26247 #undef ARM_VARIANT
26248 #define ARM_VARIANT & arm_ext_sb
26249 #undef THUMB_VARIANT
26250 #define THUMB_VARIANT & arm_ext_sb
26251 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
26252
26253 #undef ARM_VARIANT
26254 #define ARM_VARIANT & arm_ext_predres
26255 #undef THUMB_VARIANT
26256 #define THUMB_VARIANT & arm_ext_predres
26257 CE("cfprctx", e070f93, 1, (RRnpc), rd),
26258 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
26259 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
26260
26261 /* ARMv8-M instructions. */
26262 #undef ARM_VARIANT
26263 #define ARM_VARIANT NULL
26264 #undef THUMB_VARIANT
26265 #define THUMB_VARIANT & arm_ext_v8m
26266 ToU("sg", e97fe97f, 0, (), noargs),
26267 ToC("blxns", 4784, 1, (RRnpc), t_blx),
26268 ToC("bxns", 4704, 1, (RRnpc), t_bx),
26269 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
26270 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
26271 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
26272 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
26273
26274 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
26275 instructions behave as nop if no VFP is present. */
26276 #undef THUMB_VARIANT
26277 #define THUMB_VARIANT & arm_ext_v8m_main
26278 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
26279 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
26280
26281 /* Armv8.1-M Mainline instructions. */
26282 #undef THUMB_VARIANT
26283 #define THUMB_VARIANT & arm_ext_v8_1m_main
26284 toU("cinc", _cinc, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26285 toU("cinv", _cinv, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26286 toU("cneg", _cneg, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26287 toU("csel", _csel, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26288 toU("csetm", _csetm, 2, (RRnpcsp, COND), t_cond),
26289 toU("cset", _cset, 2, (RRnpcsp, COND), t_cond),
26290 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26291 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26292 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26293
26294 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
26295 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
26296 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
26297 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
26298 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
26299
26300 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
26301 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
26302 toU("le", _le, 2, (oLR, EXP), t_loloop),
26303
26304 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
26305 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
26306
26307 #undef THUMB_VARIANT
26308 #define THUMB_VARIANT & mve_ext
26309 ToC("lsll", ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26310 ToC("lsrl", ea50011f, 3, (RRe, RRo, I32), mve_scalar_shift),
26311 ToC("asrl", ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26312 ToC("uqrshll", ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26313 ToC("sqrshrl", ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26314 ToC("uqshll", ea51010f, 3, (RRe, RRo, I32), mve_scalar_shift),
26315 ToC("urshrl", ea51011f, 3, (RRe, RRo, I32), mve_scalar_shift),
26316 ToC("srshrl", ea51012f, 3, (RRe, RRo, I32), mve_scalar_shift),
26317 ToC("sqshll", ea51013f, 3, (RRe, RRo, I32), mve_scalar_shift),
26318 ToC("uqrshl", ea500f0d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
26319 ToC("sqrshr", ea500f2d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
26320 ToC("uqshl", ea500f0f, 2, (RRnpcsp, I32), mve_scalar_shift),
26321 ToC("urshr", ea500f1f, 2, (RRnpcsp, I32), mve_scalar_shift),
26322 ToC("srshr", ea500f2f, 2, (RRnpcsp, I32), mve_scalar_shift),
26323 ToC("sqshl", ea500f3f, 2, (RRnpcsp, I32), mve_scalar_shift),
26324
26325 ToC("vpt", ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26326 ToC("vptt", ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26327 ToC("vpte", ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26328 ToC("vpttt", ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26329 ToC("vptte", ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26330 ToC("vptet", ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26331 ToC("vptee", ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26332 ToC("vptttt", ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26333 ToC("vpttte", ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26334 ToC("vpttet", ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26335 ToC("vpttee", ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26336 ToC("vptett", ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26337 ToC("vptete", ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26338 ToC("vpteet", ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26339 ToC("vpteee", ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26340
26341 ToC("vpst", fe710f4d, 0, (), mve_vpt),
26342 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
26343 ToC("vpste", fe718f4d, 0, (), mve_vpt),
26344 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
26345 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
26346 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
26347 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
26348 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
26349 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
26350 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
26351 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
26352 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
26353 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
26354 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
26355 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
26356
26357 /* MVE and MVE FP only. */
26358 mToC("vhcadd", ee000f00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vhcadd),
26359 mCEF(vctp, _vctp, 1, (RRnpc), mve_vctp),
26360 mCEF(vadc, _vadc, 3, (RMQ, RMQ, RMQ), mve_vadc),
26361 mCEF(vadci, _vadci, 3, (RMQ, RMQ, RMQ), mve_vadc),
26362 mToC("vsbc", fe300f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
26363 mToC("vsbci", fe301f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
26364 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
26365 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
26366 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26367 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26368 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
26369 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
26370 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26371 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26372 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26373 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26374 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
26375 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
26376
26377 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26378 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26379 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26380 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26381 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26382 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26383 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26384 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26385 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26386 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26387 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26388 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26389 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26390 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26391 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26392 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26393 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26394 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26395 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26396 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26397
26398 mCEF(vmovnt, _vmovnt, 2, (RMQ, RMQ), mve_movn),
26399 mCEF(vmovnb, _vmovnb, 2, (RMQ, RMQ), mve_movn),
26400 mCEF(vbrsr, _vbrsr, 3, (RMQ, RMQ, RR), mve_vbrsr),
26401 mCEF(vaddlv, _vaddlv, 3, (RRe, RRo, RMQ), mve_vaddlv),
26402 mCEF(vaddlva, _vaddlva, 3, (RRe, RRo, RMQ), mve_vaddlv),
26403 mCEF(vaddv, _vaddv, 2, (RRe, RMQ), mve_vaddv),
26404 mCEF(vaddva, _vaddva, 2, (RRe, RMQ), mve_vaddv),
26405 mCEF(vddup, _vddup, 3, (RMQ, RRe, EXPi), mve_viddup),
26406 mCEF(vdwdup, _vdwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
26407 mCEF(vidup, _vidup, 3, (RMQ, RRe, EXPi), mve_viddup),
26408 mCEF(viwdup, _viwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
26409 mToC("vmaxa", ee330e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
26410 mToC("vmina", ee331e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
26411 mCEF(vmaxv, _vmaxv, 2, (RR, RMQ), mve_vmaxv),
26412 mCEF(vmaxav, _vmaxav, 2, (RR, RMQ), mve_vmaxv),
26413 mCEF(vminv, _vminv, 2, (RR, RMQ), mve_vmaxv),
26414 mCEF(vminav, _vminav, 2, (RR, RMQ), mve_vmaxv),
26415
26416 mCEF(vmlaldav, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26417 mCEF(vmlaldava, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26418 mCEF(vmlaldavx, _vmlaldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26419 mCEF(vmlaldavax, _vmlaldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26420 mCEF(vmlalv, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26421 mCEF(vmlalva, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26422 mCEF(vmlsldav, _vmlsldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26423 mCEF(vmlsldava, _vmlsldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26424 mCEF(vmlsldavx, _vmlsldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26425 mCEF(vmlsldavax, _vmlsldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26426 mToC("vrmlaldavh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26427 mToC("vrmlaldavha",ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26428 mCEF(vrmlaldavhx, _vrmlaldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26429 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26430 mToC("vrmlalvh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26431 mToC("vrmlalvha", ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26432 mCEF(vrmlsldavh, _vrmlsldavh, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26433 mCEF(vrmlsldavha, _vrmlsldavha, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26434 mCEF(vrmlsldavhx, _vrmlsldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26435 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26436
26437 mToC("vmlas", ee011e40, 3, (RMQ, RMQ, RR), mve_vmlas),
26438 mToC("vmulh", ee010e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
26439 mToC("vrmulh", ee011e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
26440 mToC("vpnot", fe310f4d, 0, (), mve_vpnot),
26441 mToC("vpsel", fe310f01, 3, (RMQ, RMQ, RMQ), mve_vpsel),
26442
26443 mToC("vqdmladh", ee000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26444 mToC("vqdmladhx", ee001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26445 mToC("vqrdmladh", ee000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26446 mToC("vqrdmladhx",ee001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26447 mToC("vqdmlsdh", fe000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26448 mToC("vqdmlsdhx", fe001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26449 mToC("vqrdmlsdh", fe000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26450 mToC("vqrdmlsdhx",fe001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26451 mToC("vqdmlah", ee000e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26452 mToC("vqdmlash", ee001e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26453 mToC("vqrdmlash", ee001e40, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26454 mToC("vqdmullt", ee301f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
26455 mToC("vqdmullb", ee300f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
26456 mCEF(vqmovnt, _vqmovnt, 2, (RMQ, RMQ), mve_vqmovn),
26457 mCEF(vqmovnb, _vqmovnb, 2, (RMQ, RMQ), mve_vqmovn),
26458 mCEF(vqmovunt, _vqmovunt, 2, (RMQ, RMQ), mve_vqmovn),
26459 mCEF(vqmovunb, _vqmovunb, 2, (RMQ, RMQ), mve_vqmovn),
26460
26461 mCEF(vshrnt, _vshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26462 mCEF(vshrnb, _vshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26463 mCEF(vrshrnt, _vrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26464 mCEF(vrshrnb, _vrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26465 mCEF(vqshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26466 mCEF(vqshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26467 mCEF(vqshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26468 mCEF(vqshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26469 mCEF(vqrshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26470 mCEF(vqrshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26471 mCEF(vqrshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26472 mCEF(vqrshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26473
26474 mToC("vshlc", eea00fc0, 3, (RMQ, RR, I32z), mve_vshlc),
26475 mToC("vshllt", ee201e00, 3, (RMQ, RMQ, I32), mve_vshll),
26476 mToC("vshllb", ee200e00, 3, (RMQ, RMQ, I32), mve_vshll),
26477
26478 toU("dlstp", _dlstp, 2, (LR, RR), t_loloop),
26479 toU("wlstp", _wlstp, 3, (LR, RR, EXP), t_loloop),
26480 toU("letp", _letp, 2, (LR, EXP), t_loloop),
26481 toU("lctp", _lctp, 0, (), t_loloop),
26482
26483 #undef THUMB_VARIANT
26484 #define THUMB_VARIANT & mve_fp_ext
26485 mToC("vcmul", ee300e00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vcmul),
26486 mToC("vfmas", ee311e40, 3, (RMQ, RMQ, RR), mve_vfmas),
26487 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
26488 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
26489 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ), mve_vmaxnmv),
26490 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ), mve_vmaxnmv),
26491 mToC("vminnmv", eeee0f80, 2, (RR, RMQ), mve_vmaxnmv),
26492 mToC("vminnmav",eeec0f80, 2, (RR, RMQ), mve_vmaxnmv),
26493
26494 #undef ARM_VARIANT
26495 #define ARM_VARIANT & fpu_vfp_ext_v1
26496 #undef THUMB_VARIANT
26497 #define THUMB_VARIANT & arm_ext_v6t2
26498 mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
26499 mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
26500
26501 mcCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
26502
26503 #undef ARM_VARIANT
26504 #define ARM_VARIANT & fpu_vfp_ext_v1xd
26505
26506 MNCE(vmov, 0, 1, (VMOV), neon_mov),
26507 mcCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
26508 mcCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
26509 mcCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
26510
26511 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
26512 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26513 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26514
26515 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26516 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26517
26518 mCEF(vmovlt, _vmovlt, 1, (VMOV), mve_movl),
26519 mCEF(vmovlb, _vmovlb, 1, (VMOV), mve_movl),
26520
26521 mnCE(vcmp, _vcmp, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26522 mnCE(vcmpe, _vcmpe, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26523
26524 #undef ARM_VARIANT
26525 #define ARM_VARIANT & fpu_vfp_ext_v2
26526
26527 mcCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26528 mcCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26529 mcCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
26530 mcCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
26531
26532 #undef ARM_VARIANT
26533 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
26534 mnUF(vcvta, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvta),
26535 mnUF(vcvtp, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtp),
26536 mnUF(vcvtn, _vcvta, 3, (RNSDQMQ, oRNSDQMQ, oI32z), neon_cvtn),
26537 mnUF(vcvtm, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtm),
26538 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26539 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26540
26541 #undef ARM_VARIANT
26542 #define ARM_VARIANT & fpu_neon_ext_v1
26543 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26544 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
26545 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
26546 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
26547 mnUF(vand, _vand, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26548 mnUF(vbic, _vbic, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26549 mnUF(vorr, _vorr, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26550 mnUF(vorn, _vorn, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26551 mnUF(veor, _veor, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_logic),
26552 MNUF(vcls, 1b00400, 2, (RNDQMQ, RNDQMQ), neon_cls),
26553 MNUF(vclz, 1b00480, 2, (RNDQMQ, RNDQMQ), neon_clz),
26554 mnCE(vdup, _vdup, 2, (RNDQMQ, RR_RNSC), neon_dup),
26555 MNUF(vhadd, 00000000, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26556 MNUF(vrhadd, 00000100, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_i_su),
26557 MNUF(vhsub, 00000200, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26558 mnUF(vmin, _vmin, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26559 mnUF(vmax, _vmax, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26560 MNUF(vqadd, 0000010, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26561 MNUF(vqsub, 0000210, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26562 mnUF(vmvn, _vmvn, 2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26563 MNUF(vqabs, 1b00700, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26564 MNUF(vqneg, 1b00780, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26565 mnUF(vqrdmlah, _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26566 mnUF(vqdmulh, _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26567 mnUF(vqrdmulh, _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26568 MNUF(vqrshl, 0000510, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26569 MNUF(vrshl, 0000500, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26570 MNUF(vshr, 0800010, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26571 MNUF(vrshr, 0800210, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26572 MNUF(vsli, 1800510, 3, (RNDQMQ, oRNDQMQ, I63), neon_sli),
26573 MNUF(vsri, 1800410, 3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26574 MNUF(vrev64, 1b00000, 2, (RNDQMQ, RNDQMQ), neon_rev),
26575 MNUF(vrev32, 1b00080, 2, (RNDQMQ, RNDQMQ), neon_rev),
26576 MNUF(vrev16, 1b00100, 2, (RNDQMQ, RNDQMQ), neon_rev),
26577 mnUF(vshl, _vshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26578 mnUF(vqshl, _vqshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26579 MNUF(vqshlu, 1800610, 3, (RNDQMQ, oRNDQMQ, I63), neon_qshlu_imm),
26580
26581 #undef ARM_VARIANT
26582 #define ARM_VARIANT & arm_ext_v8_3
26583 #undef THUMB_VARIANT
26584 #define THUMB_VARIANT & arm_ext_v6t2_v8m
26585 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26586 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26587
26588 #undef ARM_VARIANT
26589 #define ARM_VARIANT &arm_ext_bf16
26590 #undef THUMB_VARIANT
26591 #define THUMB_VARIANT &arm_ext_bf16
26592 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26593 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26594 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26595
26596 #undef ARM_VARIANT
26597 #define ARM_VARIANT &arm_ext_i8mm
26598 #undef THUMB_VARIANT
26599 #define THUMB_VARIANT &arm_ext_i8mm
26600 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26601 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26602 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26603 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26604 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26605
26606 #undef ARM_VARIANT
26607 #undef THUMB_VARIANT
26608 #define THUMB_VARIANT &arm_ext_cde
26609 ToC ("cx1", ee000000, 3, (RCP, APSR_RR, I8191), cx1),
26610 ToC ("cx1a", fe000000, 3, (RCP, APSR_RR, I8191), cx1a),
26611 ToC ("cx1d", ee000040, 4, (RCP, RR, APSR_RR, I8191), cx1d),
26612 ToC ("cx1da", fe000040, 4, (RCP, RR, APSR_RR, I8191), cx1da),
26613
26614 ToC ("cx2", ee400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2),
26615 ToC ("cx2a", fe400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2a),
26616 ToC ("cx2d", ee400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2d),
26617 ToC ("cx2da", fe400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2da),
26618
26619 ToC ("cx3", ee800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3),
26620 ToC ("cx3a", fe800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3a),
26621 ToC ("cx3d", ee800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3d),
26622 ToC ("cx3da", fe800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3da),
26623
26624 mToC ("vcx1", ec200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26625 mToC ("vcx1a", fc200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26626
26627 mToC ("vcx2", ec300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26628 mToC ("vcx2a", fc300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26629
26630 mToC ("vcx3", ec800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26631 mToC ("vcx3a", fc800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26632 };
26633
26634 #undef ARM_VARIANT
26635 #undef THUMB_VARIANT
26636 #undef TCE
26637 #undef TUE
26638 #undef TUF
26639 #undef TCC
26640 #undef cCE
26641 #undef cCL
26642 #undef C3E
26643 #undef C3
26644 #undef CE
26645 #undef CM
26646 #undef CL
26647 #undef UE
26648 #undef UF
26649 #undef UT
26650 #undef NUF
26651 #undef nUF
26652 #undef NCE
26653 #undef nCE
26654 #undef OPS0
26655 #undef OPS1
26656 #undef OPS2
26657 #undef OPS3
26658 #undef OPS4
26659 #undef OPS5
26660 #undef OPS6
26661 #undef do_0
26662 #undef ToC
26663 #undef toC
26664 #undef ToU
26665 #undef toU
26666 \f
26667 /* MD interface: bits in the object file. */
26668
26669 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26670 for use in the a.out file, and stores them in the array pointed to by buf.
26671 This knows about the endian-ness of the target machine and does
26672 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
26673 2 (short) and 4 (long) Floating numbers are put out as a series of
26674 LITTLENUMS (shorts, here at least). */
26675
26676 void
26677 md_number_to_chars (char * buf, valueT val, int n)
26678 {
26679 if (target_big_endian)
26680 number_to_chars_bigendian (buf, val, n);
26681 else
26682 number_to_chars_littleendian (buf, val, n);
26683 }
26684
26685 static valueT
26686 md_chars_to_number (char * buf, int n)
26687 {
26688 valueT result = 0;
26689 unsigned char * where = (unsigned char *) buf;
26690
26691 if (target_big_endian)
26692 {
26693 while (n--)
26694 {
26695 result <<= 8;
26696 result |= (*where++ & 255);
26697 }
26698 }
26699 else
26700 {
26701 while (n--)
26702 {
26703 result <<= 8;
26704 result |= (where[n] & 255);
26705 }
26706 }
26707
26708 return result;
26709 }
26710
26711 /* MD interface: Sections. */
26712
26713 /* Calculate the maximum variable size (i.e., excluding fr_fix)
26714 that an rs_machine_dependent frag may reach. */
26715
26716 unsigned int
26717 arm_frag_max_var (fragS *fragp)
26718 {
26719 /* We only use rs_machine_dependent for variable-size Thumb instructions,
26720 which are either THUMB_SIZE (2) or INSN_SIZE (4).
26721
26722 Note that we generate relaxable instructions even for cases that don't
26723 really need it, like an immediate that's a trivial constant. So we're
26724 overestimating the instruction size for some of those cases. Rather
26725 than putting more intelligence here, it would probably be better to
26726 avoid generating a relaxation frag in the first place when it can be
26727 determined up front that a short instruction will suffice. */
26728
26729 gas_assert (fragp->fr_type == rs_machine_dependent);
26730 return INSN_SIZE;
26731 }
26732
26733 /* Estimate the size of a frag before relaxing. Assume everything fits in
26734 2 bytes. */
26735
26736 int
26737 md_estimate_size_before_relax (fragS * fragp,
26738 segT segtype ATTRIBUTE_UNUSED)
26739 {
26740 fragp->fr_var = 2;
26741 return 2;
26742 }
26743
26744 /* Convert a machine dependent frag. */
26745
26746 void
26747 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26748 {
26749 unsigned long insn;
26750 unsigned long old_op;
26751 char *buf;
26752 expressionS exp;
26753 fixS *fixp;
26754 int reloc_type;
26755 int pc_rel;
26756 int opcode;
26757
26758 buf = fragp->fr_literal + fragp->fr_fix;
26759
26760 old_op = bfd_get_16(abfd, buf);
26761 if (fragp->fr_symbol)
26762 {
26763 exp.X_op = O_symbol;
26764 exp.X_add_symbol = fragp->fr_symbol;
26765 }
26766 else
26767 {
26768 exp.X_op = O_constant;
26769 }
26770 exp.X_add_number = fragp->fr_offset;
26771 opcode = fragp->fr_subtype;
26772 switch (opcode)
26773 {
26774 case T_MNEM_ldr_pc:
26775 case T_MNEM_ldr_pc2:
26776 case T_MNEM_ldr_sp:
26777 case T_MNEM_str_sp:
26778 case T_MNEM_ldr:
26779 case T_MNEM_ldrb:
26780 case T_MNEM_ldrh:
26781 case T_MNEM_str:
26782 case T_MNEM_strb:
26783 case T_MNEM_strh:
26784 if (fragp->fr_var == 4)
26785 {
26786 insn = THUMB_OP32 (opcode);
26787 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26788 {
26789 insn |= (old_op & 0x700) << 4;
26790 }
26791 else
26792 {
26793 insn |= (old_op & 7) << 12;
26794 insn |= (old_op & 0x38) << 13;
26795 }
26796 insn |= 0x00000c00;
26797 put_thumb32_insn (buf, insn);
26798 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26799 }
26800 else
26801 {
26802 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26803 }
26804 pc_rel = (opcode == T_MNEM_ldr_pc2);
26805 break;
26806 case T_MNEM_adr:
26807 if (fragp->fr_var == 4)
26808 {
26809 insn = THUMB_OP32 (opcode);
26810 insn |= (old_op & 0xf0) << 4;
26811 put_thumb32_insn (buf, insn);
26812 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26813 }
26814 else
26815 {
26816 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26817 exp.X_add_number -= 4;
26818 }
26819 pc_rel = 1;
26820 break;
26821 case T_MNEM_mov:
26822 case T_MNEM_movs:
26823 case T_MNEM_cmp:
26824 case T_MNEM_cmn:
26825 if (fragp->fr_var == 4)
26826 {
26827 int r0off = (opcode == T_MNEM_mov
26828 || opcode == T_MNEM_movs) ? 0 : 8;
26829 insn = THUMB_OP32 (opcode);
26830 insn = (insn & 0xe1ffffff) | 0x10000000;
26831 insn |= (old_op & 0x700) << r0off;
26832 put_thumb32_insn (buf, insn);
26833 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26834 }
26835 else
26836 {
26837 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26838 }
26839 pc_rel = 0;
26840 break;
26841 case T_MNEM_b:
26842 if (fragp->fr_var == 4)
26843 {
26844 insn = THUMB_OP32(opcode);
26845 put_thumb32_insn (buf, insn);
26846 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26847 }
26848 else
26849 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26850 pc_rel = 1;
26851 break;
26852 case T_MNEM_bcond:
26853 if (fragp->fr_var == 4)
26854 {
26855 insn = THUMB_OP32(opcode);
26856 insn |= (old_op & 0xf00) << 14;
26857 put_thumb32_insn (buf, insn);
26858 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26859 }
26860 else
26861 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26862 pc_rel = 1;
26863 break;
26864 case T_MNEM_add_sp:
26865 case T_MNEM_add_pc:
26866 case T_MNEM_inc_sp:
26867 case T_MNEM_dec_sp:
26868 if (fragp->fr_var == 4)
26869 {
26870 /* ??? Choose between add and addw. */
26871 insn = THUMB_OP32 (opcode);
26872 insn |= (old_op & 0xf0) << 4;
26873 put_thumb32_insn (buf, insn);
26874 if (opcode == T_MNEM_add_pc)
26875 reloc_type = BFD_RELOC_ARM_T32_IMM12;
26876 else
26877 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26878 }
26879 else
26880 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26881 pc_rel = 0;
26882 break;
26883
26884 case T_MNEM_addi:
26885 case T_MNEM_addis:
26886 case T_MNEM_subi:
26887 case T_MNEM_subis:
26888 if (fragp->fr_var == 4)
26889 {
26890 insn = THUMB_OP32 (opcode);
26891 insn |= (old_op & 0xf0) << 4;
26892 insn |= (old_op & 0xf) << 16;
26893 put_thumb32_insn (buf, insn);
26894 if (insn & (1 << 20))
26895 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26896 else
26897 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26898 }
26899 else
26900 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26901 pc_rel = 0;
26902 break;
26903 default:
26904 abort ();
26905 }
26906 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26907 (enum bfd_reloc_code_real) reloc_type);
26908 fixp->fx_file = fragp->fr_file;
26909 fixp->fx_line = fragp->fr_line;
26910 fragp->fr_fix += fragp->fr_var;
26911
26912 /* Set whether we use thumb-2 ISA based on final relaxation results. */
26913 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26914 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26915 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26916 }
26917
26918 /* Return the size of a relaxable immediate operand instruction.
26919 SHIFT and SIZE specify the form of the allowable immediate. */
26920 static int
26921 relax_immediate (fragS *fragp, int size, int shift)
26922 {
26923 offsetT offset;
26924 offsetT mask;
26925 offsetT low;
26926
26927 /* ??? Should be able to do better than this. */
26928 if (fragp->fr_symbol)
26929 return 4;
26930
26931 low = (1 << shift) - 1;
26932 mask = (1 << (shift + size)) - (1 << shift);
26933 offset = fragp->fr_offset;
26934 /* Force misaligned offsets to 32-bit variant. */
26935 if (offset & low)
26936 return 4;
26937 if (offset & ~mask)
26938 return 4;
26939 return 2;
26940 }
26941
26942 /* Get the address of a symbol during relaxation. */
26943 static addressT
26944 relaxed_symbol_addr (fragS *fragp, long stretch)
26945 {
26946 fragS *sym_frag;
26947 addressT addr;
26948 symbolS *sym;
26949
26950 sym = fragp->fr_symbol;
26951 sym_frag = symbol_get_frag (sym);
26952 know (S_GET_SEGMENT (sym) != absolute_section
26953 || sym_frag == &zero_address_frag);
26954 addr = S_GET_VALUE (sym) + fragp->fr_offset;
26955
26956 /* If frag has yet to be reached on this pass, assume it will
26957 move by STRETCH just as we did. If this is not so, it will
26958 be because some frag between grows, and that will force
26959 another pass. */
26960
26961 if (stretch != 0
26962 && sym_frag->relax_marker != fragp->relax_marker)
26963 {
26964 fragS *f;
26965
26966 /* Adjust stretch for any alignment frag. Note that if have
26967 been expanding the earlier code, the symbol may be
26968 defined in what appears to be an earlier frag. FIXME:
26969 This doesn't handle the fr_subtype field, which specifies
26970 a maximum number of bytes to skip when doing an
26971 alignment. */
26972 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
26973 {
26974 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
26975 {
26976 if (stretch < 0)
26977 stretch = - ((- stretch)
26978 & ~ ((1 << (int) f->fr_offset) - 1));
26979 else
26980 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
26981 if (stretch == 0)
26982 break;
26983 }
26984 }
26985 if (f != NULL)
26986 addr += stretch;
26987 }
26988
26989 return addr;
26990 }
26991
26992 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
26993 load. */
26994 static int
26995 relax_adr (fragS *fragp, asection *sec, long stretch)
26996 {
26997 addressT addr;
26998 offsetT val;
26999
27000 /* Assume worst case for symbols not known to be in the same section. */
27001 if (fragp->fr_symbol == NULL
27002 || !S_IS_DEFINED (fragp->fr_symbol)
27003 || sec != S_GET_SEGMENT (fragp->fr_symbol)
27004 || S_IS_WEAK (fragp->fr_symbol))
27005 return 4;
27006
27007 val = relaxed_symbol_addr (fragp, stretch);
27008 addr = fragp->fr_address + fragp->fr_fix;
27009 addr = (addr + 4) & ~3;
27010 /* Force misaligned targets to 32-bit variant. */
27011 if (val & 3)
27012 return 4;
27013 val -= addr;
27014 if (val < 0 || val > 1020)
27015 return 4;
27016 return 2;
27017 }
27018
27019 /* Return the size of a relaxable add/sub immediate instruction. */
27020 static int
27021 relax_addsub (fragS *fragp, asection *sec)
27022 {
27023 char *buf;
27024 int op;
27025
27026 buf = fragp->fr_literal + fragp->fr_fix;
27027 op = bfd_get_16(sec->owner, buf);
27028 if ((op & 0xf) == ((op >> 4) & 0xf))
27029 return relax_immediate (fragp, 8, 0);
27030 else
27031 return relax_immediate (fragp, 3, 0);
27032 }
27033
27034 /* Return TRUE iff the definition of symbol S could be pre-empted
27035 (overridden) at link or load time. */
27036 static bfd_boolean
27037 symbol_preemptible (symbolS *s)
27038 {
27039 /* Weak symbols can always be pre-empted. */
27040 if (S_IS_WEAK (s))
27041 return TRUE;
27042
27043 /* Non-global symbols cannot be pre-empted. */
27044 if (! S_IS_EXTERNAL (s))
27045 return FALSE;
27046
27047 #ifdef OBJ_ELF
27048 /* In ELF, a global symbol can be marked protected, or private. In that
27049 case it can't be pre-empted (other definitions in the same link unit
27050 would violate the ODR). */
27051 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
27052 return FALSE;
27053 #endif
27054
27055 /* Other global symbols might be pre-empted. */
27056 return TRUE;
27057 }
27058
27059 /* Return the size of a relaxable branch instruction. BITS is the
27060 size of the offset field in the narrow instruction. */
27061
27062 static int
27063 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
27064 {
27065 addressT addr;
27066 offsetT val;
27067 offsetT limit;
27068
27069 /* Assume worst case for symbols not known to be in the same section. */
27070 if (!S_IS_DEFINED (fragp->fr_symbol)
27071 || sec != S_GET_SEGMENT (fragp->fr_symbol)
27072 || S_IS_WEAK (fragp->fr_symbol))
27073 return 4;
27074
27075 #ifdef OBJ_ELF
27076 /* A branch to a function in ARM state will require interworking. */
27077 if (S_IS_DEFINED (fragp->fr_symbol)
27078 && ARM_IS_FUNC (fragp->fr_symbol))
27079 return 4;
27080 #endif
27081
27082 if (symbol_preemptible (fragp->fr_symbol))
27083 return 4;
27084
27085 val = relaxed_symbol_addr (fragp, stretch);
27086 addr = fragp->fr_address + fragp->fr_fix + 4;
27087 val -= addr;
27088
27089 /* Offset is a signed value *2 */
27090 limit = 1 << bits;
27091 if (val >= limit || val < -limit)
27092 return 4;
27093 return 2;
27094 }
27095
27096
27097 /* Relax a machine dependent frag. This returns the amount by which
27098 the current size of the frag should change. */
27099
27100 int
27101 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
27102 {
27103 int oldsize;
27104 int newsize;
27105
27106 oldsize = fragp->fr_var;
27107 switch (fragp->fr_subtype)
27108 {
27109 case T_MNEM_ldr_pc2:
27110 newsize = relax_adr (fragp, sec, stretch);
27111 break;
27112 case T_MNEM_ldr_pc:
27113 case T_MNEM_ldr_sp:
27114 case T_MNEM_str_sp:
27115 newsize = relax_immediate (fragp, 8, 2);
27116 break;
27117 case T_MNEM_ldr:
27118 case T_MNEM_str:
27119 newsize = relax_immediate (fragp, 5, 2);
27120 break;
27121 case T_MNEM_ldrh:
27122 case T_MNEM_strh:
27123 newsize = relax_immediate (fragp, 5, 1);
27124 break;
27125 case T_MNEM_ldrb:
27126 case T_MNEM_strb:
27127 newsize = relax_immediate (fragp, 5, 0);
27128 break;
27129 case T_MNEM_adr:
27130 newsize = relax_adr (fragp, sec, stretch);
27131 break;
27132 case T_MNEM_mov:
27133 case T_MNEM_movs:
27134 case T_MNEM_cmp:
27135 case T_MNEM_cmn:
27136 newsize = relax_immediate (fragp, 8, 0);
27137 break;
27138 case T_MNEM_b:
27139 newsize = relax_branch (fragp, sec, 11, stretch);
27140 break;
27141 case T_MNEM_bcond:
27142 newsize = relax_branch (fragp, sec, 8, stretch);
27143 break;
27144 case T_MNEM_add_sp:
27145 case T_MNEM_add_pc:
27146 newsize = relax_immediate (fragp, 8, 2);
27147 break;
27148 case T_MNEM_inc_sp:
27149 case T_MNEM_dec_sp:
27150 newsize = relax_immediate (fragp, 7, 2);
27151 break;
27152 case T_MNEM_addi:
27153 case T_MNEM_addis:
27154 case T_MNEM_subi:
27155 case T_MNEM_subis:
27156 newsize = relax_addsub (fragp, sec);
27157 break;
27158 default:
27159 abort ();
27160 }
27161
27162 fragp->fr_var = newsize;
27163 /* Freeze wide instructions that are at or before the same location as
27164 in the previous pass. This avoids infinite loops.
27165 Don't freeze them unconditionally because targets may be artificially
27166 misaligned by the expansion of preceding frags. */
27167 if (stretch <= 0 && newsize > 2)
27168 {
27169 md_convert_frag (sec->owner, sec, fragp);
27170 frag_wane (fragp);
27171 }
27172
27173 return newsize - oldsize;
27174 }
27175
27176 /* Round up a section size to the appropriate boundary. */
27177
27178 valueT
27179 md_section_align (segT segment ATTRIBUTE_UNUSED,
27180 valueT size)
27181 {
27182 return size;
27183 }
27184
27185 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
27186 of an rs_align_code fragment. */
27187
27188 void
27189 arm_handle_align (fragS * fragP)
27190 {
27191 static unsigned char const arm_noop[2][2][4] =
27192 {
27193 { /* ARMv1 */
27194 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
27195 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
27196 },
27197 { /* ARMv6k */
27198 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
27199 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
27200 },
27201 };
27202 static unsigned char const thumb_noop[2][2][2] =
27203 {
27204 { /* Thumb-1 */
27205 {0xc0, 0x46}, /* LE */
27206 {0x46, 0xc0}, /* BE */
27207 },
27208 { /* Thumb-2 */
27209 {0x00, 0xbf}, /* LE */
27210 {0xbf, 0x00} /* BE */
27211 }
27212 };
27213 static unsigned char const wide_thumb_noop[2][4] =
27214 { /* Wide Thumb-2 */
27215 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
27216 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
27217 };
27218
27219 unsigned bytes, fix, noop_size;
27220 char * p;
27221 const unsigned char * noop;
27222 const unsigned char *narrow_noop = NULL;
27223 #ifdef OBJ_ELF
27224 enum mstate state;
27225 #endif
27226
27227 if (fragP->fr_type != rs_align_code)
27228 return;
27229
27230 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
27231 p = fragP->fr_literal + fragP->fr_fix;
27232 fix = 0;
27233
27234 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
27235 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
27236
27237 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
27238
27239 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
27240 {
27241 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27242 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
27243 {
27244 narrow_noop = thumb_noop[1][target_big_endian];
27245 noop = wide_thumb_noop[target_big_endian];
27246 }
27247 else
27248 noop = thumb_noop[0][target_big_endian];
27249 noop_size = 2;
27250 #ifdef OBJ_ELF
27251 state = MAP_THUMB;
27252 #endif
27253 }
27254 else
27255 {
27256 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27257 ? selected_cpu : arm_arch_none,
27258 arm_ext_v6k) != 0]
27259 [target_big_endian];
27260 noop_size = 4;
27261 #ifdef OBJ_ELF
27262 state = MAP_ARM;
27263 #endif
27264 }
27265
27266 fragP->fr_var = noop_size;
27267
27268 if (bytes & (noop_size - 1))
27269 {
27270 fix = bytes & (noop_size - 1);
27271 #ifdef OBJ_ELF
27272 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
27273 #endif
27274 memset (p, 0, fix);
27275 p += fix;
27276 bytes -= fix;
27277 }
27278
27279 if (narrow_noop)
27280 {
27281 if (bytes & noop_size)
27282 {
27283 /* Insert a narrow noop. */
27284 memcpy (p, narrow_noop, noop_size);
27285 p += noop_size;
27286 bytes -= noop_size;
27287 fix += noop_size;
27288 }
27289
27290 /* Use wide noops for the remainder */
27291 noop_size = 4;
27292 }
27293
27294 while (bytes >= noop_size)
27295 {
27296 memcpy (p, noop, noop_size);
27297 p += noop_size;
27298 bytes -= noop_size;
27299 fix += noop_size;
27300 }
27301
27302 fragP->fr_fix += fix;
27303 }
27304
27305 /* Called from md_do_align. Used to create an alignment
27306 frag in a code section. */
27307
27308 void
27309 arm_frag_align_code (int n, int max)
27310 {
27311 char * p;
27312
27313 /* We assume that there will never be a requirement
27314 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
27315 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
27316 {
27317 char err_msg[128];
27318
27319 sprintf (err_msg,
27320 _("alignments greater than %d bytes not supported in .text sections."),
27321 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
27322 as_fatal ("%s", err_msg);
27323 }
27324
27325 p = frag_var (rs_align_code,
27326 MAX_MEM_FOR_RS_ALIGN_CODE,
27327 1,
27328 (relax_substateT) max,
27329 (symbolS *) NULL,
27330 (offsetT) n,
27331 (char *) NULL);
27332 *p = 0;
27333 }
27334
27335 /* Perform target specific initialisation of a frag.
27336 Note - despite the name this initialisation is not done when the frag
27337 is created, but only when its type is assigned. A frag can be created
27338 and used a long time before its type is set, so beware of assuming that
27339 this initialisation is performed first. */
27340
27341 #ifndef OBJ_ELF
27342 void
27343 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
27344 {
27345 /* Record whether this frag is in an ARM or a THUMB area. */
27346 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27347 }
27348
27349 #else /* OBJ_ELF is defined. */
27350 void
27351 arm_init_frag (fragS * fragP, int max_chars)
27352 {
27353 bfd_boolean frag_thumb_mode;
27354
27355 /* If the current ARM vs THUMB mode has not already
27356 been recorded into this frag then do so now. */
27357 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
27358 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27359
27360 /* PR 21809: Do not set a mapping state for debug sections
27361 - it just confuses other tools. */
27362 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
27363 return;
27364
27365 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
27366
27367 /* Record a mapping symbol for alignment frags. We will delete this
27368 later if the alignment ends up empty. */
27369 switch (fragP->fr_type)
27370 {
27371 case rs_align:
27372 case rs_align_test:
27373 case rs_fill:
27374 mapping_state_2 (MAP_DATA, max_chars);
27375 break;
27376 case rs_align_code:
27377 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
27378 break;
27379 default:
27380 break;
27381 }
27382 }
27383
27384 /* When we change sections we need to issue a new mapping symbol. */
27385
27386 void
27387 arm_elf_change_section (void)
27388 {
27389 /* Link an unlinked unwind index table section to the .text section. */
27390 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
27391 && elf_linked_to_section (now_seg) == NULL)
27392 elf_linked_to_section (now_seg) = text_section;
27393 }
27394
27395 int
27396 arm_elf_section_type (const char * str, size_t len)
27397 {
27398 if (len == 5 && strncmp (str, "exidx", 5) == 0)
27399 return SHT_ARM_EXIDX;
27400
27401 return -1;
27402 }
27403 \f
27404 /* Code to deal with unwinding tables. */
27405
27406 static void add_unwind_adjustsp (offsetT);
27407
27408 /* Generate any deferred unwind frame offset. */
27409
27410 static void
27411 flush_pending_unwind (void)
27412 {
27413 offsetT offset;
27414
27415 offset = unwind.pending_offset;
27416 unwind.pending_offset = 0;
27417 if (offset != 0)
27418 add_unwind_adjustsp (offset);
27419 }
27420
27421 /* Add an opcode to this list for this function. Two-byte opcodes should
27422 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
27423 order. */
27424
27425 static void
27426 add_unwind_opcode (valueT op, int length)
27427 {
27428 /* Add any deferred stack adjustment. */
27429 if (unwind.pending_offset)
27430 flush_pending_unwind ();
27431
27432 unwind.sp_restored = 0;
27433
27434 if (unwind.opcode_count + length > unwind.opcode_alloc)
27435 {
27436 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
27437 if (unwind.opcodes)
27438 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
27439 unwind.opcode_alloc);
27440 else
27441 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
27442 }
27443 while (length > 0)
27444 {
27445 length--;
27446 unwind.opcodes[unwind.opcode_count] = op & 0xff;
27447 op >>= 8;
27448 unwind.opcode_count++;
27449 }
27450 }
27451
27452 /* Add unwind opcodes to adjust the stack pointer. */
27453
27454 static void
27455 add_unwind_adjustsp (offsetT offset)
27456 {
27457 valueT op;
27458
27459 if (offset > 0x200)
27460 {
27461 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
27462 char bytes[5];
27463 int n;
27464 valueT o;
27465
27466 /* Long form: 0xb2, uleb128. */
27467 /* This might not fit in a word so add the individual bytes,
27468 remembering the list is built in reverse order. */
27469 o = (valueT) ((offset - 0x204) >> 2);
27470 if (o == 0)
27471 add_unwind_opcode (0, 1);
27472
27473 /* Calculate the uleb128 encoding of the offset. */
27474 n = 0;
27475 while (o)
27476 {
27477 bytes[n] = o & 0x7f;
27478 o >>= 7;
27479 if (o)
27480 bytes[n] |= 0x80;
27481 n++;
27482 }
27483 /* Add the insn. */
27484 for (; n; n--)
27485 add_unwind_opcode (bytes[n - 1], 1);
27486 add_unwind_opcode (0xb2, 1);
27487 }
27488 else if (offset > 0x100)
27489 {
27490 /* Two short opcodes. */
27491 add_unwind_opcode (0x3f, 1);
27492 op = (offset - 0x104) >> 2;
27493 add_unwind_opcode (op, 1);
27494 }
27495 else if (offset > 0)
27496 {
27497 /* Short opcode. */
27498 op = (offset - 4) >> 2;
27499 add_unwind_opcode (op, 1);
27500 }
27501 else if (offset < 0)
27502 {
27503 offset = -offset;
27504 while (offset > 0x100)
27505 {
27506 add_unwind_opcode (0x7f, 1);
27507 offset -= 0x100;
27508 }
27509 op = ((offset - 4) >> 2) | 0x40;
27510 add_unwind_opcode (op, 1);
27511 }
27512 }
27513
27514 /* Finish the list of unwind opcodes for this function. */
27515
27516 static void
27517 finish_unwind_opcodes (void)
27518 {
27519 valueT op;
27520
27521 if (unwind.fp_used)
27522 {
27523 /* Adjust sp as necessary. */
27524 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
27525 flush_pending_unwind ();
27526
27527 /* After restoring sp from the frame pointer. */
27528 op = 0x90 | unwind.fp_reg;
27529 add_unwind_opcode (op, 1);
27530 }
27531 else
27532 flush_pending_unwind ();
27533 }
27534
27535
27536 /* Start an exception table entry. If idx is nonzero this is an index table
27537 entry. */
27538
27539 static void
27540 start_unwind_section (const segT text_seg, int idx)
27541 {
27542 const char * text_name;
27543 const char * prefix;
27544 const char * prefix_once;
27545 struct elf_section_match match;
27546 char * sec_name;
27547 int type;
27548 int flags;
27549 int linkonce;
27550
27551 if (idx)
27552 {
27553 prefix = ELF_STRING_ARM_unwind;
27554 prefix_once = ELF_STRING_ARM_unwind_once;
27555 type = SHT_ARM_EXIDX;
27556 }
27557 else
27558 {
27559 prefix = ELF_STRING_ARM_unwind_info;
27560 prefix_once = ELF_STRING_ARM_unwind_info_once;
27561 type = SHT_PROGBITS;
27562 }
27563
27564 text_name = segment_name (text_seg);
27565 if (streq (text_name, ".text"))
27566 text_name = "";
27567
27568 if (strncmp (text_name, ".gnu.linkonce.t.",
27569 strlen (".gnu.linkonce.t.")) == 0)
27570 {
27571 prefix = prefix_once;
27572 text_name += strlen (".gnu.linkonce.t.");
27573 }
27574
27575 sec_name = concat (prefix, text_name, (char *) NULL);
27576
27577 flags = SHF_ALLOC;
27578 linkonce = 0;
27579 memset (&match, 0, sizeof (match));
27580
27581 /* Handle COMDAT group. */
27582 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27583 {
27584 match.group_name = elf_group_name (text_seg);
27585 if (match.group_name == NULL)
27586 {
27587 as_bad (_("Group section `%s' has no group signature"),
27588 segment_name (text_seg));
27589 ignore_rest_of_line ();
27590 return;
27591 }
27592 flags |= SHF_GROUP;
27593 linkonce = 1;
27594 }
27595
27596 obj_elf_change_section (sec_name, type, flags, 0, &match,
27597 linkonce, 0);
27598
27599 /* Set the section link for index tables. */
27600 if (idx)
27601 elf_linked_to_section (now_seg) = text_seg;
27602 }
27603
27604
27605 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
27606 personality routine data. Returns zero, or the index table value for
27607 an inline entry. */
27608
27609 static valueT
27610 create_unwind_entry (int have_data)
27611 {
27612 int size;
27613 addressT where;
27614 char *ptr;
27615 /* The current word of data. */
27616 valueT data;
27617 /* The number of bytes left in this word. */
27618 int n;
27619
27620 finish_unwind_opcodes ();
27621
27622 /* Remember the current text section. */
27623 unwind.saved_seg = now_seg;
27624 unwind.saved_subseg = now_subseg;
27625
27626 start_unwind_section (now_seg, 0);
27627
27628 if (unwind.personality_routine == NULL)
27629 {
27630 if (unwind.personality_index == -2)
27631 {
27632 if (have_data)
27633 as_bad (_("handlerdata in cantunwind frame"));
27634 return 1; /* EXIDX_CANTUNWIND. */
27635 }
27636
27637 /* Use a default personality routine if none is specified. */
27638 if (unwind.personality_index == -1)
27639 {
27640 if (unwind.opcode_count > 3)
27641 unwind.personality_index = 1;
27642 else
27643 unwind.personality_index = 0;
27644 }
27645
27646 /* Space for the personality routine entry. */
27647 if (unwind.personality_index == 0)
27648 {
27649 if (unwind.opcode_count > 3)
27650 as_bad (_("too many unwind opcodes for personality routine 0"));
27651
27652 if (!have_data)
27653 {
27654 /* All the data is inline in the index table. */
27655 data = 0x80;
27656 n = 3;
27657 while (unwind.opcode_count > 0)
27658 {
27659 unwind.opcode_count--;
27660 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27661 n--;
27662 }
27663
27664 /* Pad with "finish" opcodes. */
27665 while (n--)
27666 data = (data << 8) | 0xb0;
27667
27668 return data;
27669 }
27670 size = 0;
27671 }
27672 else
27673 /* We get two opcodes "free" in the first word. */
27674 size = unwind.opcode_count - 2;
27675 }
27676 else
27677 {
27678 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
27679 if (unwind.personality_index != -1)
27680 {
27681 as_bad (_("attempt to recreate an unwind entry"));
27682 return 1;
27683 }
27684
27685 /* An extra byte is required for the opcode count. */
27686 size = unwind.opcode_count + 1;
27687 }
27688
27689 size = (size + 3) >> 2;
27690 if (size > 0xff)
27691 as_bad (_("too many unwind opcodes"));
27692
27693 frag_align (2, 0, 0);
27694 record_alignment (now_seg, 2);
27695 unwind.table_entry = expr_build_dot ();
27696
27697 /* Allocate the table entry. */
27698 ptr = frag_more ((size << 2) + 4);
27699 /* PR 13449: Zero the table entries in case some of them are not used. */
27700 memset (ptr, 0, (size << 2) + 4);
27701 where = frag_now_fix () - ((size << 2) + 4);
27702
27703 switch (unwind.personality_index)
27704 {
27705 case -1:
27706 /* ??? Should this be a PLT generating relocation? */
27707 /* Custom personality routine. */
27708 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27709 BFD_RELOC_ARM_PREL31);
27710
27711 where += 4;
27712 ptr += 4;
27713
27714 /* Set the first byte to the number of additional words. */
27715 data = size > 0 ? size - 1 : 0;
27716 n = 3;
27717 break;
27718
27719 /* ABI defined personality routines. */
27720 case 0:
27721 /* Three opcodes bytes are packed into the first word. */
27722 data = 0x80;
27723 n = 3;
27724 break;
27725
27726 case 1:
27727 case 2:
27728 /* The size and first two opcode bytes go in the first word. */
27729 data = ((0x80 + unwind.personality_index) << 8) | size;
27730 n = 2;
27731 break;
27732
27733 default:
27734 /* Should never happen. */
27735 abort ();
27736 }
27737
27738 /* Pack the opcodes into words (MSB first), reversing the list at the same
27739 time. */
27740 while (unwind.opcode_count > 0)
27741 {
27742 if (n == 0)
27743 {
27744 md_number_to_chars (ptr, data, 4);
27745 ptr += 4;
27746 n = 4;
27747 data = 0;
27748 }
27749 unwind.opcode_count--;
27750 n--;
27751 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27752 }
27753
27754 /* Finish off the last word. */
27755 if (n < 4)
27756 {
27757 /* Pad with "finish" opcodes. */
27758 while (n--)
27759 data = (data << 8) | 0xb0;
27760
27761 md_number_to_chars (ptr, data, 4);
27762 }
27763
27764 if (!have_data)
27765 {
27766 /* Add an empty descriptor if there is no user-specified data. */
27767 ptr = frag_more (4);
27768 md_number_to_chars (ptr, 0, 4);
27769 }
27770
27771 return 0;
27772 }
27773
27774
27775 /* Initialize the DWARF-2 unwind information for this procedure. */
27776
27777 void
27778 tc_arm_frame_initial_instructions (void)
27779 {
27780 cfi_add_CFA_def_cfa (REG_SP, 0);
27781 }
27782 #endif /* OBJ_ELF */
27783
27784 /* Convert REGNAME to a DWARF-2 register number. */
27785
27786 int
27787 tc_arm_regname_to_dw2regnum (char *regname)
27788 {
27789 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27790 if (reg != FAIL)
27791 return reg;
27792
27793 /* PR 16694: Allow VFP registers as well. */
27794 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27795 if (reg != FAIL)
27796 return 64 + reg;
27797
27798 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27799 if (reg != FAIL)
27800 return reg + 256;
27801
27802 return FAIL;
27803 }
27804
27805 #ifdef TE_PE
27806 void
27807 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27808 {
27809 expressionS exp;
27810
27811 exp.X_op = O_secrel;
27812 exp.X_add_symbol = symbol;
27813 exp.X_add_number = 0;
27814 emit_expr (&exp, size);
27815 }
27816 #endif
27817
27818 /* MD interface: Symbol and relocation handling. */
27819
27820 /* Return the address within the segment that a PC-relative fixup is
27821 relative to. For ARM, PC-relative fixups applied to instructions
27822 are generally relative to the location of the fixup plus 8 bytes.
27823 Thumb branches are offset by 4, and Thumb loads relative to PC
27824 require special handling. */
27825
27826 long
27827 md_pcrel_from_section (fixS * fixP, segT seg)
27828 {
27829 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27830
27831 /* If this is pc-relative and we are going to emit a relocation
27832 then we just want to put out any pipeline compensation that the linker
27833 will need. Otherwise we want to use the calculated base.
27834 For WinCE we skip the bias for externals as well, since this
27835 is how the MS ARM-CE assembler behaves and we want to be compatible. */
27836 if (fixP->fx_pcrel
27837 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27838 || (arm_force_relocation (fixP)
27839 #ifdef TE_WINCE
27840 && !S_IS_EXTERNAL (fixP->fx_addsy)
27841 #endif
27842 )))
27843 base = 0;
27844
27845
27846 switch (fixP->fx_r_type)
27847 {
27848 /* PC relative addressing on the Thumb is slightly odd as the
27849 bottom two bits of the PC are forced to zero for the
27850 calculation. This happens *after* application of the
27851 pipeline offset. However, Thumb adrl already adjusts for
27852 this, so we need not do it again. */
27853 case BFD_RELOC_ARM_THUMB_ADD:
27854 return base & ~3;
27855
27856 case BFD_RELOC_ARM_THUMB_OFFSET:
27857 case BFD_RELOC_ARM_T32_OFFSET_IMM:
27858 case BFD_RELOC_ARM_T32_ADD_PC12:
27859 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27860 return (base + 4) & ~3;
27861
27862 /* Thumb branches are simply offset by +4. */
27863 case BFD_RELOC_THUMB_PCREL_BRANCH5:
27864 case BFD_RELOC_THUMB_PCREL_BRANCH7:
27865 case BFD_RELOC_THUMB_PCREL_BRANCH9:
27866 case BFD_RELOC_THUMB_PCREL_BRANCH12:
27867 case BFD_RELOC_THUMB_PCREL_BRANCH20:
27868 case BFD_RELOC_THUMB_PCREL_BRANCH25:
27869 case BFD_RELOC_THUMB_PCREL_BFCSEL:
27870 case BFD_RELOC_ARM_THUMB_BF17:
27871 case BFD_RELOC_ARM_THUMB_BF19:
27872 case BFD_RELOC_ARM_THUMB_BF13:
27873 case BFD_RELOC_ARM_THUMB_LOOP12:
27874 return base + 4;
27875
27876 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27877 if (fixP->fx_addsy
27878 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27879 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27880 && ARM_IS_FUNC (fixP->fx_addsy)
27881 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27882 base = fixP->fx_where + fixP->fx_frag->fr_address;
27883 return base + 4;
27884
27885 /* BLX is like branches above, but forces the low two bits of PC to
27886 zero. */
27887 case BFD_RELOC_THUMB_PCREL_BLX:
27888 if (fixP->fx_addsy
27889 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27890 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27891 && THUMB_IS_FUNC (fixP->fx_addsy)
27892 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27893 base = fixP->fx_where + fixP->fx_frag->fr_address;
27894 return (base + 4) & ~3;
27895
27896 /* ARM mode branches are offset by +8. However, the Windows CE
27897 loader expects the relocation not to take this into account. */
27898 case BFD_RELOC_ARM_PCREL_BLX:
27899 if (fixP->fx_addsy
27900 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27901 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27902 && ARM_IS_FUNC (fixP->fx_addsy)
27903 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27904 base = fixP->fx_where + fixP->fx_frag->fr_address;
27905 return base + 8;
27906
27907 case BFD_RELOC_ARM_PCREL_CALL:
27908 if (fixP->fx_addsy
27909 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27910 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27911 && THUMB_IS_FUNC (fixP->fx_addsy)
27912 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27913 base = fixP->fx_where + fixP->fx_frag->fr_address;
27914 return base + 8;
27915
27916 case BFD_RELOC_ARM_PCREL_BRANCH:
27917 case BFD_RELOC_ARM_PCREL_JUMP:
27918 case BFD_RELOC_ARM_PLT32:
27919 #ifdef TE_WINCE
27920 /* When handling fixups immediately, because we have already
27921 discovered the value of a symbol, or the address of the frag involved
27922 we must account for the offset by +8, as the OS loader will never see the reloc.
27923 see fixup_segment() in write.c
27924 The S_IS_EXTERNAL test handles the case of global symbols.
27925 Those need the calculated base, not just the pipe compensation the linker will need. */
27926 if (fixP->fx_pcrel
27927 && fixP->fx_addsy != NULL
27928 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27929 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27930 return base + 8;
27931 return base;
27932 #else
27933 return base + 8;
27934 #endif
27935
27936
27937 /* ARM mode loads relative to PC are also offset by +8. Unlike
27938 branches, the Windows CE loader *does* expect the relocation
27939 to take this into account. */
27940 case BFD_RELOC_ARM_OFFSET_IMM:
27941 case BFD_RELOC_ARM_OFFSET_IMM8:
27942 case BFD_RELOC_ARM_HWLITERAL:
27943 case BFD_RELOC_ARM_LITERAL:
27944 case BFD_RELOC_ARM_CP_OFF_IMM:
27945 return base + 8;
27946
27947
27948 /* Other PC-relative relocations are un-offset. */
27949 default:
27950 return base;
27951 }
27952 }
27953
27954 static bfd_boolean flag_warn_syms = TRUE;
27955
27956 bfd_boolean
27957 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27958 {
27959 /* PR 18347 - Warn if the user attempts to create a symbol with the same
27960 name as an ARM instruction. Whilst strictly speaking it is allowed, it
27961 does mean that the resulting code might be very confusing to the reader.
27962 Also this warning can be triggered if the user omits an operand before
27963 an immediate address, eg:
27964
27965 LDR =foo
27966
27967 GAS treats this as an assignment of the value of the symbol foo to a
27968 symbol LDR, and so (without this code) it will not issue any kind of
27969 warning or error message.
27970
27971 Note - ARM instructions are case-insensitive but the strings in the hash
27972 table are all stored in lower case, so we must first ensure that name is
27973 lower case too. */
27974 if (flag_warn_syms && arm_ops_hsh)
27975 {
27976 char * nbuf = strdup (name);
27977 char * p;
27978
27979 for (p = nbuf; *p; p++)
27980 *p = TOLOWER (*p);
27981 if (hash_find (arm_ops_hsh, nbuf) != NULL)
27982 {
27983 static struct hash_control * already_warned = NULL;
27984
27985 if (already_warned == NULL)
27986 already_warned = hash_new ();
27987 /* Only warn about the symbol once. To keep the code
27988 simple we let hash_insert do the lookup for us. */
27989 if (hash_insert (already_warned, nbuf, NULL) == NULL)
27990 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
27991 }
27992 else
27993 free (nbuf);
27994 }
27995
27996 return FALSE;
27997 }
27998
27999 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
28000 Otherwise we have no need to default values of symbols. */
28001
28002 symbolS *
28003 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
28004 {
28005 #ifdef OBJ_ELF
28006 if (name[0] == '_' && name[1] == 'G'
28007 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
28008 {
28009 if (!GOT_symbol)
28010 {
28011 if (symbol_find (name))
28012 as_bad (_("GOT already in the symbol table"));
28013
28014 GOT_symbol = symbol_new (name, undefined_section,
28015 (valueT) 0, & zero_address_frag);
28016 }
28017
28018 return GOT_symbol;
28019 }
28020 #endif
28021
28022 return NULL;
28023 }
28024
28025 /* Subroutine of md_apply_fix. Check to see if an immediate can be
28026 computed as two separate immediate values, added together. We
28027 already know that this value cannot be computed by just one ARM
28028 instruction. */
28029
28030 static unsigned int
28031 validate_immediate_twopart (unsigned int val,
28032 unsigned int * highpart)
28033 {
28034 unsigned int a;
28035 unsigned int i;
28036
28037 for (i = 0; i < 32; i += 2)
28038 if (((a = rotate_left (val, i)) & 0xff) != 0)
28039 {
28040 if (a & 0xff00)
28041 {
28042 if (a & ~ 0xffff)
28043 continue;
28044 * highpart = (a >> 8) | ((i + 24) << 7);
28045 }
28046 else if (a & 0xff0000)
28047 {
28048 if (a & 0xff000000)
28049 continue;
28050 * highpart = (a >> 16) | ((i + 16) << 7);
28051 }
28052 else
28053 {
28054 gas_assert (a & 0xff000000);
28055 * highpart = (a >> 24) | ((i + 8) << 7);
28056 }
28057
28058 return (a & 0xff) | (i << 7);
28059 }
28060
28061 return FAIL;
28062 }
28063
28064 static int
28065 validate_offset_imm (unsigned int val, int hwse)
28066 {
28067 if ((hwse && val > 255) || val > 4095)
28068 return FAIL;
28069 return val;
28070 }
28071
28072 /* Subroutine of md_apply_fix. Do those data_ops which can take a
28073 negative immediate constant by altering the instruction. A bit of
28074 a hack really.
28075 MOV <-> MVN
28076 AND <-> BIC
28077 ADC <-> SBC
28078 by inverting the second operand, and
28079 ADD <-> SUB
28080 CMP <-> CMN
28081 by negating the second operand. */
28082
28083 static int
28084 negate_data_op (unsigned long * instruction,
28085 unsigned long value)
28086 {
28087 int op, new_inst;
28088 unsigned long negated, inverted;
28089
28090 negated = encode_arm_immediate (-value);
28091 inverted = encode_arm_immediate (~value);
28092
28093 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
28094 switch (op)
28095 {
28096 /* First negates. */
28097 case OPCODE_SUB: /* ADD <-> SUB */
28098 new_inst = OPCODE_ADD;
28099 value = negated;
28100 break;
28101
28102 case OPCODE_ADD:
28103 new_inst = OPCODE_SUB;
28104 value = negated;
28105 break;
28106
28107 case OPCODE_CMP: /* CMP <-> CMN */
28108 new_inst = OPCODE_CMN;
28109 value = negated;
28110 break;
28111
28112 case OPCODE_CMN:
28113 new_inst = OPCODE_CMP;
28114 value = negated;
28115 break;
28116
28117 /* Now Inverted ops. */
28118 case OPCODE_MOV: /* MOV <-> MVN */
28119 new_inst = OPCODE_MVN;
28120 value = inverted;
28121 break;
28122
28123 case OPCODE_MVN:
28124 new_inst = OPCODE_MOV;
28125 value = inverted;
28126 break;
28127
28128 case OPCODE_AND: /* AND <-> BIC */
28129 new_inst = OPCODE_BIC;
28130 value = inverted;
28131 break;
28132
28133 case OPCODE_BIC:
28134 new_inst = OPCODE_AND;
28135 value = inverted;
28136 break;
28137
28138 case OPCODE_ADC: /* ADC <-> SBC */
28139 new_inst = OPCODE_SBC;
28140 value = inverted;
28141 break;
28142
28143 case OPCODE_SBC:
28144 new_inst = OPCODE_ADC;
28145 value = inverted;
28146 break;
28147
28148 /* We cannot do anything. */
28149 default:
28150 return FAIL;
28151 }
28152
28153 if (value == (unsigned) FAIL)
28154 return FAIL;
28155
28156 *instruction &= OPCODE_MASK;
28157 *instruction |= new_inst << DATA_OP_SHIFT;
28158 return value;
28159 }
28160
28161 /* Like negate_data_op, but for Thumb-2. */
28162
28163 static unsigned int
28164 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
28165 {
28166 int op, new_inst;
28167 int rd;
28168 unsigned int negated, inverted;
28169
28170 negated = encode_thumb32_immediate (-value);
28171 inverted = encode_thumb32_immediate (~value);
28172
28173 rd = (*instruction >> 8) & 0xf;
28174 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
28175 switch (op)
28176 {
28177 /* ADD <-> SUB. Includes CMP <-> CMN. */
28178 case T2_OPCODE_SUB:
28179 new_inst = T2_OPCODE_ADD;
28180 value = negated;
28181 break;
28182
28183 case T2_OPCODE_ADD:
28184 new_inst = T2_OPCODE_SUB;
28185 value = negated;
28186 break;
28187
28188 /* ORR <-> ORN. Includes MOV <-> MVN. */
28189 case T2_OPCODE_ORR:
28190 new_inst = T2_OPCODE_ORN;
28191 value = inverted;
28192 break;
28193
28194 case T2_OPCODE_ORN:
28195 new_inst = T2_OPCODE_ORR;
28196 value = inverted;
28197 break;
28198
28199 /* AND <-> BIC. TST has no inverted equivalent. */
28200 case T2_OPCODE_AND:
28201 new_inst = T2_OPCODE_BIC;
28202 if (rd == 15)
28203 value = FAIL;
28204 else
28205 value = inverted;
28206 break;
28207
28208 case T2_OPCODE_BIC:
28209 new_inst = T2_OPCODE_AND;
28210 value = inverted;
28211 break;
28212
28213 /* ADC <-> SBC */
28214 case T2_OPCODE_ADC:
28215 new_inst = T2_OPCODE_SBC;
28216 value = inverted;
28217 break;
28218
28219 case T2_OPCODE_SBC:
28220 new_inst = T2_OPCODE_ADC;
28221 value = inverted;
28222 break;
28223
28224 /* We cannot do anything. */
28225 default:
28226 return FAIL;
28227 }
28228
28229 if (value == (unsigned int)FAIL)
28230 return FAIL;
28231
28232 *instruction &= T2_OPCODE_MASK;
28233 *instruction |= new_inst << T2_DATA_OP_SHIFT;
28234 return value;
28235 }
28236
28237 /* Read a 32-bit thumb instruction from buf. */
28238
28239 static unsigned long
28240 get_thumb32_insn (char * buf)
28241 {
28242 unsigned long insn;
28243 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
28244 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28245
28246 return insn;
28247 }
28248
28249 /* We usually want to set the low bit on the address of thumb function
28250 symbols. In particular .word foo - . should have the low bit set.
28251 Generic code tries to fold the difference of two symbols to
28252 a constant. Prevent this and force a relocation when the first symbols
28253 is a thumb function. */
28254
28255 bfd_boolean
28256 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
28257 {
28258 if (op == O_subtract
28259 && l->X_op == O_symbol
28260 && r->X_op == O_symbol
28261 && THUMB_IS_FUNC (l->X_add_symbol))
28262 {
28263 l->X_op = O_subtract;
28264 l->X_op_symbol = r->X_add_symbol;
28265 l->X_add_number -= r->X_add_number;
28266 return TRUE;
28267 }
28268
28269 /* Process as normal. */
28270 return FALSE;
28271 }
28272
28273 /* Encode Thumb2 unconditional branches and calls. The encoding
28274 for the 2 are identical for the immediate values. */
28275
28276 static void
28277 encode_thumb2_b_bl_offset (char * buf, offsetT value)
28278 {
28279 #define T2I1I2MASK ((1 << 13) | (1 << 11))
28280 offsetT newval;
28281 offsetT newval2;
28282 addressT S, I1, I2, lo, hi;
28283
28284 S = (value >> 24) & 0x01;
28285 I1 = (value >> 23) & 0x01;
28286 I2 = (value >> 22) & 0x01;
28287 hi = (value >> 12) & 0x3ff;
28288 lo = (value >> 1) & 0x7ff;
28289 newval = md_chars_to_number (buf, THUMB_SIZE);
28290 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28291 newval |= (S << 10) | hi;
28292 newval2 &= ~T2I1I2MASK;
28293 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
28294 md_number_to_chars (buf, newval, THUMB_SIZE);
28295 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28296 }
28297
28298 void
28299 md_apply_fix (fixS * fixP,
28300 valueT * valP,
28301 segT seg)
28302 {
28303 offsetT value = * valP;
28304 offsetT newval;
28305 unsigned int newimm;
28306 unsigned long temp;
28307 int sign;
28308 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
28309
28310 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
28311
28312 /* Note whether this will delete the relocation. */
28313
28314 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
28315 fixP->fx_done = 1;
28316
28317 /* On a 64-bit host, silently truncate 'value' to 32 bits for
28318 consistency with the behaviour on 32-bit hosts. Remember value
28319 for emit_reloc. */
28320 value &= 0xffffffff;
28321 value ^= 0x80000000;
28322 value -= 0x80000000;
28323
28324 *valP = value;
28325 fixP->fx_addnumber = value;
28326
28327 /* Same treatment for fixP->fx_offset. */
28328 fixP->fx_offset &= 0xffffffff;
28329 fixP->fx_offset ^= 0x80000000;
28330 fixP->fx_offset -= 0x80000000;
28331
28332 switch (fixP->fx_r_type)
28333 {
28334 case BFD_RELOC_NONE:
28335 /* This will need to go in the object file. */
28336 fixP->fx_done = 0;
28337 break;
28338
28339 case BFD_RELOC_ARM_IMMEDIATE:
28340 /* We claim that this fixup has been processed here,
28341 even if in fact we generate an error because we do
28342 not have a reloc for it, so tc_gen_reloc will reject it. */
28343 fixP->fx_done = 1;
28344
28345 if (fixP->fx_addsy)
28346 {
28347 const char *msg = 0;
28348
28349 if (! S_IS_DEFINED (fixP->fx_addsy))
28350 msg = _("undefined symbol %s used as an immediate value");
28351 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28352 msg = _("symbol %s is in a different section");
28353 else if (S_IS_WEAK (fixP->fx_addsy))
28354 msg = _("symbol %s is weak and may be overridden later");
28355
28356 if (msg)
28357 {
28358 as_bad_where (fixP->fx_file, fixP->fx_line,
28359 msg, S_GET_NAME (fixP->fx_addsy));
28360 break;
28361 }
28362 }
28363
28364 temp = md_chars_to_number (buf, INSN_SIZE);
28365
28366 /* If the offset is negative, we should use encoding A2 for ADR. */
28367 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
28368 newimm = negate_data_op (&temp, value);
28369 else
28370 {
28371 newimm = encode_arm_immediate (value);
28372
28373 /* If the instruction will fail, see if we can fix things up by
28374 changing the opcode. */
28375 if (newimm == (unsigned int) FAIL)
28376 newimm = negate_data_op (&temp, value);
28377 /* MOV accepts both ARM modified immediate (A1 encoding) and
28378 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
28379 When disassembling, MOV is preferred when there is no encoding
28380 overlap. */
28381 if (newimm == (unsigned int) FAIL
28382 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
28383 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
28384 && !((temp >> SBIT_SHIFT) & 0x1)
28385 && value >= 0 && value <= 0xffff)
28386 {
28387 /* Clear bits[23:20] to change encoding from A1 to A2. */
28388 temp &= 0xff0fffff;
28389 /* Encoding high 4bits imm. Code below will encode the remaining
28390 low 12bits. */
28391 temp |= (value & 0x0000f000) << 4;
28392 newimm = value & 0x00000fff;
28393 }
28394 }
28395
28396 if (newimm == (unsigned int) FAIL)
28397 {
28398 as_bad_where (fixP->fx_file, fixP->fx_line,
28399 _("invalid constant (%lx) after fixup"),
28400 (unsigned long) value);
28401 break;
28402 }
28403
28404 newimm |= (temp & 0xfffff000);
28405 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28406 break;
28407
28408 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
28409 {
28410 unsigned int highpart = 0;
28411 unsigned int newinsn = 0xe1a00000; /* nop. */
28412
28413 if (fixP->fx_addsy)
28414 {
28415 const char *msg = 0;
28416
28417 if (! S_IS_DEFINED (fixP->fx_addsy))
28418 msg = _("undefined symbol %s used as an immediate value");
28419 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28420 msg = _("symbol %s is in a different section");
28421 else if (S_IS_WEAK (fixP->fx_addsy))
28422 msg = _("symbol %s is weak and may be overridden later");
28423
28424 if (msg)
28425 {
28426 as_bad_where (fixP->fx_file, fixP->fx_line,
28427 msg, S_GET_NAME (fixP->fx_addsy));
28428 break;
28429 }
28430 }
28431
28432 newimm = encode_arm_immediate (value);
28433 temp = md_chars_to_number (buf, INSN_SIZE);
28434
28435 /* If the instruction will fail, see if we can fix things up by
28436 changing the opcode. */
28437 if (newimm == (unsigned int) FAIL
28438 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
28439 {
28440 /* No ? OK - try using two ADD instructions to generate
28441 the value. */
28442 newimm = validate_immediate_twopart (value, & highpart);
28443
28444 /* Yes - then make sure that the second instruction is
28445 also an add. */
28446 if (newimm != (unsigned int) FAIL)
28447 newinsn = temp;
28448 /* Still No ? Try using a negated value. */
28449 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
28450 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
28451 /* Otherwise - give up. */
28452 else
28453 {
28454 as_bad_where (fixP->fx_file, fixP->fx_line,
28455 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
28456 (long) value);
28457 break;
28458 }
28459
28460 /* Replace the first operand in the 2nd instruction (which
28461 is the PC) with the destination register. We have
28462 already added in the PC in the first instruction and we
28463 do not want to do it again. */
28464 newinsn &= ~ 0xf0000;
28465 newinsn |= ((newinsn & 0x0f000) << 4);
28466 }
28467
28468 newimm |= (temp & 0xfffff000);
28469 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28470
28471 highpart |= (newinsn & 0xfffff000);
28472 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
28473 }
28474 break;
28475
28476 case BFD_RELOC_ARM_OFFSET_IMM:
28477 if (!fixP->fx_done && seg->use_rela_p)
28478 value = 0;
28479 /* Fall through. */
28480
28481 case BFD_RELOC_ARM_LITERAL:
28482 sign = value > 0;
28483
28484 if (value < 0)
28485 value = - value;
28486
28487 if (validate_offset_imm (value, 0) == FAIL)
28488 {
28489 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
28490 as_bad_where (fixP->fx_file, fixP->fx_line,
28491 _("invalid literal constant: pool needs to be closer"));
28492 else
28493 as_bad_where (fixP->fx_file, fixP->fx_line,
28494 _("bad immediate value for offset (%ld)"),
28495 (long) value);
28496 break;
28497 }
28498
28499 newval = md_chars_to_number (buf, INSN_SIZE);
28500 if (value == 0)
28501 newval &= 0xfffff000;
28502 else
28503 {
28504 newval &= 0xff7ff000;
28505 newval |= value | (sign ? INDEX_UP : 0);
28506 }
28507 md_number_to_chars (buf, newval, INSN_SIZE);
28508 break;
28509
28510 case BFD_RELOC_ARM_OFFSET_IMM8:
28511 case BFD_RELOC_ARM_HWLITERAL:
28512 sign = value > 0;
28513
28514 if (value < 0)
28515 value = - value;
28516
28517 if (validate_offset_imm (value, 1) == FAIL)
28518 {
28519 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
28520 as_bad_where (fixP->fx_file, fixP->fx_line,
28521 _("invalid literal constant: pool needs to be closer"));
28522 else
28523 as_bad_where (fixP->fx_file, fixP->fx_line,
28524 _("bad immediate value for 8-bit offset (%ld)"),
28525 (long) value);
28526 break;
28527 }
28528
28529 newval = md_chars_to_number (buf, INSN_SIZE);
28530 if (value == 0)
28531 newval &= 0xfffff0f0;
28532 else
28533 {
28534 newval &= 0xff7ff0f0;
28535 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28536 }
28537 md_number_to_chars (buf, newval, INSN_SIZE);
28538 break;
28539
28540 case BFD_RELOC_ARM_T32_OFFSET_U8:
28541 if (value < 0 || value > 1020 || value % 4 != 0)
28542 as_bad_where (fixP->fx_file, fixP->fx_line,
28543 _("bad immediate value for offset (%ld)"), (long) value);
28544 value /= 4;
28545
28546 newval = md_chars_to_number (buf+2, THUMB_SIZE);
28547 newval |= value;
28548 md_number_to_chars (buf+2, newval, THUMB_SIZE);
28549 break;
28550
28551 case BFD_RELOC_ARM_T32_OFFSET_IMM:
28552 /* This is a complicated relocation used for all varieties of Thumb32
28553 load/store instruction with immediate offset:
28554
28555 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28556 *4, optional writeback(W)
28557 (doubleword load/store)
28558
28559 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28560 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28561 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28562 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28563 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28564
28565 Uppercase letters indicate bits that are already encoded at
28566 this point. Lowercase letters are our problem. For the
28567 second block of instructions, the secondary opcode nybble
28568 (bits 8..11) is present, and bit 23 is zero, even if this is
28569 a PC-relative operation. */
28570 newval = md_chars_to_number (buf, THUMB_SIZE);
28571 newval <<= 16;
28572 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28573
28574 if ((newval & 0xf0000000) == 0xe0000000)
28575 {
28576 /* Doubleword load/store: 8-bit offset, scaled by 4. */
28577 if (value >= 0)
28578 newval |= (1 << 23);
28579 else
28580 value = -value;
28581 if (value % 4 != 0)
28582 {
28583 as_bad_where (fixP->fx_file, fixP->fx_line,
28584 _("offset not a multiple of 4"));
28585 break;
28586 }
28587 value /= 4;
28588 if (value > 0xff)
28589 {
28590 as_bad_where (fixP->fx_file, fixP->fx_line,
28591 _("offset out of range"));
28592 break;
28593 }
28594 newval &= ~0xff;
28595 }
28596 else if ((newval & 0x000f0000) == 0x000f0000)
28597 {
28598 /* PC-relative, 12-bit offset. */
28599 if (value >= 0)
28600 newval |= (1 << 23);
28601 else
28602 value = -value;
28603 if (value > 0xfff)
28604 {
28605 as_bad_where (fixP->fx_file, fixP->fx_line,
28606 _("offset out of range"));
28607 break;
28608 }
28609 newval &= ~0xfff;
28610 }
28611 else if ((newval & 0x00000100) == 0x00000100)
28612 {
28613 /* Writeback: 8-bit, +/- offset. */
28614 if (value >= 0)
28615 newval |= (1 << 9);
28616 else
28617 value = -value;
28618 if (value > 0xff)
28619 {
28620 as_bad_where (fixP->fx_file, fixP->fx_line,
28621 _("offset out of range"));
28622 break;
28623 }
28624 newval &= ~0xff;
28625 }
28626 else if ((newval & 0x00000f00) == 0x00000e00)
28627 {
28628 /* T-instruction: positive 8-bit offset. */
28629 if (value < 0 || value > 0xff)
28630 {
28631 as_bad_where (fixP->fx_file, fixP->fx_line,
28632 _("offset out of range"));
28633 break;
28634 }
28635 newval &= ~0xff;
28636 newval |= value;
28637 }
28638 else
28639 {
28640 /* Positive 12-bit or negative 8-bit offset. */
28641 int limit;
28642 if (value >= 0)
28643 {
28644 newval |= (1 << 23);
28645 limit = 0xfff;
28646 }
28647 else
28648 {
28649 value = -value;
28650 limit = 0xff;
28651 }
28652 if (value > limit)
28653 {
28654 as_bad_where (fixP->fx_file, fixP->fx_line,
28655 _("offset out of range"));
28656 break;
28657 }
28658 newval &= ~limit;
28659 }
28660
28661 newval |= value;
28662 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28663 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28664 break;
28665
28666 case BFD_RELOC_ARM_SHIFT_IMM:
28667 newval = md_chars_to_number (buf, INSN_SIZE);
28668 if (((unsigned long) value) > 32
28669 || (value == 32
28670 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28671 {
28672 as_bad_where (fixP->fx_file, fixP->fx_line,
28673 _("shift expression is too large"));
28674 break;
28675 }
28676
28677 if (value == 0)
28678 /* Shifts of zero must be done as lsl. */
28679 newval &= ~0x60;
28680 else if (value == 32)
28681 value = 0;
28682 newval &= 0xfffff07f;
28683 newval |= (value & 0x1f) << 7;
28684 md_number_to_chars (buf, newval, INSN_SIZE);
28685 break;
28686
28687 case BFD_RELOC_ARM_T32_IMMEDIATE:
28688 case BFD_RELOC_ARM_T32_ADD_IMM:
28689 case BFD_RELOC_ARM_T32_IMM12:
28690 case BFD_RELOC_ARM_T32_ADD_PC12:
28691 /* We claim that this fixup has been processed here,
28692 even if in fact we generate an error because we do
28693 not have a reloc for it, so tc_gen_reloc will reject it. */
28694 fixP->fx_done = 1;
28695
28696 if (fixP->fx_addsy
28697 && ! S_IS_DEFINED (fixP->fx_addsy))
28698 {
28699 as_bad_where (fixP->fx_file, fixP->fx_line,
28700 _("undefined symbol %s used as an immediate value"),
28701 S_GET_NAME (fixP->fx_addsy));
28702 break;
28703 }
28704
28705 newval = md_chars_to_number (buf, THUMB_SIZE);
28706 newval <<= 16;
28707 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28708
28709 newimm = FAIL;
28710 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28711 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28712 Thumb2 modified immediate encoding (T2). */
28713 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28714 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28715 {
28716 newimm = encode_thumb32_immediate (value);
28717 if (newimm == (unsigned int) FAIL)
28718 newimm = thumb32_negate_data_op (&newval, value);
28719 }
28720 if (newimm == (unsigned int) FAIL)
28721 {
28722 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28723 {
28724 /* Turn add/sum into addw/subw. */
28725 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28726 newval = (newval & 0xfeffffff) | 0x02000000;
28727 /* No flat 12-bit imm encoding for addsw/subsw. */
28728 if ((newval & 0x00100000) == 0)
28729 {
28730 /* 12 bit immediate for addw/subw. */
28731 if (value < 0)
28732 {
28733 value = -value;
28734 newval ^= 0x00a00000;
28735 }
28736 if (value > 0xfff)
28737 newimm = (unsigned int) FAIL;
28738 else
28739 newimm = value;
28740 }
28741 }
28742 else
28743 {
28744 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28745 UINT16 (T3 encoding), MOVW only accepts UINT16. When
28746 disassembling, MOV is preferred when there is no encoding
28747 overlap. */
28748 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28749 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28750 but with the Rn field [19:16] set to 1111. */
28751 && (((newval >> 16) & 0xf) == 0xf)
28752 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28753 && !((newval >> T2_SBIT_SHIFT) & 0x1)
28754 && value >= 0 && value <= 0xffff)
28755 {
28756 /* Toggle bit[25] to change encoding from T2 to T3. */
28757 newval ^= 1 << 25;
28758 /* Clear bits[19:16]. */
28759 newval &= 0xfff0ffff;
28760 /* Encoding high 4bits imm. Code below will encode the
28761 remaining low 12bits. */
28762 newval |= (value & 0x0000f000) << 4;
28763 newimm = value & 0x00000fff;
28764 }
28765 }
28766 }
28767
28768 if (newimm == (unsigned int)FAIL)
28769 {
28770 as_bad_where (fixP->fx_file, fixP->fx_line,
28771 _("invalid constant (%lx) after fixup"),
28772 (unsigned long) value);
28773 break;
28774 }
28775
28776 newval |= (newimm & 0x800) << 15;
28777 newval |= (newimm & 0x700) << 4;
28778 newval |= (newimm & 0x0ff);
28779
28780 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28781 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28782 break;
28783
28784 case BFD_RELOC_ARM_SMC:
28785 if (((unsigned long) value) > 0xf)
28786 as_bad_where (fixP->fx_file, fixP->fx_line,
28787 _("invalid smc expression"));
28788
28789 newval = md_chars_to_number (buf, INSN_SIZE);
28790 newval |= (value & 0xf);
28791 md_number_to_chars (buf, newval, INSN_SIZE);
28792 break;
28793
28794 case BFD_RELOC_ARM_HVC:
28795 if (((unsigned long) value) > 0xffff)
28796 as_bad_where (fixP->fx_file, fixP->fx_line,
28797 _("invalid hvc expression"));
28798 newval = md_chars_to_number (buf, INSN_SIZE);
28799 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28800 md_number_to_chars (buf, newval, INSN_SIZE);
28801 break;
28802
28803 case BFD_RELOC_ARM_SWI:
28804 if (fixP->tc_fix_data != 0)
28805 {
28806 if (((unsigned long) value) > 0xff)
28807 as_bad_where (fixP->fx_file, fixP->fx_line,
28808 _("invalid swi expression"));
28809 newval = md_chars_to_number (buf, THUMB_SIZE);
28810 newval |= value;
28811 md_number_to_chars (buf, newval, THUMB_SIZE);
28812 }
28813 else
28814 {
28815 if (((unsigned long) value) > 0x00ffffff)
28816 as_bad_where (fixP->fx_file, fixP->fx_line,
28817 _("invalid swi expression"));
28818 newval = md_chars_to_number (buf, INSN_SIZE);
28819 newval |= value;
28820 md_number_to_chars (buf, newval, INSN_SIZE);
28821 }
28822 break;
28823
28824 case BFD_RELOC_ARM_MULTI:
28825 if (((unsigned long) value) > 0xffff)
28826 as_bad_where (fixP->fx_file, fixP->fx_line,
28827 _("invalid expression in load/store multiple"));
28828 newval = value | md_chars_to_number (buf, INSN_SIZE);
28829 md_number_to_chars (buf, newval, INSN_SIZE);
28830 break;
28831
28832 #ifdef OBJ_ELF
28833 case BFD_RELOC_ARM_PCREL_CALL:
28834
28835 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28836 && fixP->fx_addsy
28837 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28838 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28839 && THUMB_IS_FUNC (fixP->fx_addsy))
28840 /* Flip the bl to blx. This is a simple flip
28841 bit here because we generate PCREL_CALL for
28842 unconditional bls. */
28843 {
28844 newval = md_chars_to_number (buf, INSN_SIZE);
28845 newval = newval | 0x10000000;
28846 md_number_to_chars (buf, newval, INSN_SIZE);
28847 temp = 1;
28848 fixP->fx_done = 1;
28849 }
28850 else
28851 temp = 3;
28852 goto arm_branch_common;
28853
28854 case BFD_RELOC_ARM_PCREL_JUMP:
28855 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28856 && fixP->fx_addsy
28857 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28858 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28859 && THUMB_IS_FUNC (fixP->fx_addsy))
28860 {
28861 /* This would map to a bl<cond>, b<cond>,
28862 b<always> to a Thumb function. We
28863 need to force a relocation for this particular
28864 case. */
28865 newval = md_chars_to_number (buf, INSN_SIZE);
28866 fixP->fx_done = 0;
28867 }
28868 /* Fall through. */
28869
28870 case BFD_RELOC_ARM_PLT32:
28871 #endif
28872 case BFD_RELOC_ARM_PCREL_BRANCH:
28873 temp = 3;
28874 goto arm_branch_common;
28875
28876 case BFD_RELOC_ARM_PCREL_BLX:
28877
28878 temp = 1;
28879 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28880 && fixP->fx_addsy
28881 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28882 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28883 && ARM_IS_FUNC (fixP->fx_addsy))
28884 {
28885 /* Flip the blx to a bl and warn. */
28886 const char *name = S_GET_NAME (fixP->fx_addsy);
28887 newval = 0xeb000000;
28888 as_warn_where (fixP->fx_file, fixP->fx_line,
28889 _("blx to '%s' an ARM ISA state function changed to bl"),
28890 name);
28891 md_number_to_chars (buf, newval, INSN_SIZE);
28892 temp = 3;
28893 fixP->fx_done = 1;
28894 }
28895
28896 #ifdef OBJ_ELF
28897 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28898 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28899 #endif
28900
28901 arm_branch_common:
28902 /* We are going to store value (shifted right by two) in the
28903 instruction, in a 24 bit, signed field. Bits 26 through 32 either
28904 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
28905 also be clear. */
28906 if (value & temp)
28907 as_bad_where (fixP->fx_file, fixP->fx_line,
28908 _("misaligned branch destination"));
28909 if ((value & (offsetT)0xfe000000) != (offsetT)0
28910 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
28911 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28912
28913 if (fixP->fx_done || !seg->use_rela_p)
28914 {
28915 newval = md_chars_to_number (buf, INSN_SIZE);
28916 newval |= (value >> 2) & 0x00ffffff;
28917 /* Set the H bit on BLX instructions. */
28918 if (temp == 1)
28919 {
28920 if (value & 2)
28921 newval |= 0x01000000;
28922 else
28923 newval &= ~0x01000000;
28924 }
28925 md_number_to_chars (buf, newval, INSN_SIZE);
28926 }
28927 break;
28928
28929 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28930 /* CBZ can only branch forward. */
28931
28932 /* Attempts to use CBZ to branch to the next instruction
28933 (which, strictly speaking, are prohibited) will be turned into
28934 no-ops.
28935
28936 FIXME: It may be better to remove the instruction completely and
28937 perform relaxation. */
28938 if (value == -2)
28939 {
28940 newval = md_chars_to_number (buf, THUMB_SIZE);
28941 newval = 0xbf00; /* NOP encoding T1 */
28942 md_number_to_chars (buf, newval, THUMB_SIZE);
28943 }
28944 else
28945 {
28946 if (value & ~0x7e)
28947 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28948
28949 if (fixP->fx_done || !seg->use_rela_p)
28950 {
28951 newval = md_chars_to_number (buf, THUMB_SIZE);
28952 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28953 md_number_to_chars (buf, newval, THUMB_SIZE);
28954 }
28955 }
28956 break;
28957
28958 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
28959 if (out_of_range_p (value, 8))
28960 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28961
28962 if (fixP->fx_done || !seg->use_rela_p)
28963 {
28964 newval = md_chars_to_number (buf, THUMB_SIZE);
28965 newval |= (value & 0x1ff) >> 1;
28966 md_number_to_chars (buf, newval, THUMB_SIZE);
28967 }
28968 break;
28969
28970 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
28971 if (out_of_range_p (value, 11))
28972 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28973
28974 if (fixP->fx_done || !seg->use_rela_p)
28975 {
28976 newval = md_chars_to_number (buf, THUMB_SIZE);
28977 newval |= (value & 0xfff) >> 1;
28978 md_number_to_chars (buf, newval, THUMB_SIZE);
28979 }
28980 break;
28981
28982 /* This relocation is misnamed, it should be BRANCH21. */
28983 case BFD_RELOC_THUMB_PCREL_BRANCH20:
28984 if (fixP->fx_addsy
28985 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28986 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28987 && ARM_IS_FUNC (fixP->fx_addsy)
28988 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28989 {
28990 /* Force a relocation for a branch 20 bits wide. */
28991 fixP->fx_done = 0;
28992 }
28993 if (out_of_range_p (value, 20))
28994 as_bad_where (fixP->fx_file, fixP->fx_line,
28995 _("conditional branch out of range"));
28996
28997 if (fixP->fx_done || !seg->use_rela_p)
28998 {
28999 offsetT newval2;
29000 addressT S, J1, J2, lo, hi;
29001
29002 S = (value & 0x00100000) >> 20;
29003 J2 = (value & 0x00080000) >> 19;
29004 J1 = (value & 0x00040000) >> 18;
29005 hi = (value & 0x0003f000) >> 12;
29006 lo = (value & 0x00000ffe) >> 1;
29007
29008 newval = md_chars_to_number (buf, THUMB_SIZE);
29009 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29010 newval |= (S << 10) | hi;
29011 newval2 |= (J1 << 13) | (J2 << 11) | lo;
29012 md_number_to_chars (buf, newval, THUMB_SIZE);
29013 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29014 }
29015 break;
29016
29017 case BFD_RELOC_THUMB_PCREL_BLX:
29018 /* If there is a blx from a thumb state function to
29019 another thumb function flip this to a bl and warn
29020 about it. */
29021
29022 if (fixP->fx_addsy
29023 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29024 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29025 && THUMB_IS_FUNC (fixP->fx_addsy))
29026 {
29027 const char *name = S_GET_NAME (fixP->fx_addsy);
29028 as_warn_where (fixP->fx_file, fixP->fx_line,
29029 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
29030 name);
29031 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29032 newval = newval | 0x1000;
29033 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29034 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29035 fixP->fx_done = 1;
29036 }
29037
29038
29039 goto thumb_bl_common;
29040
29041 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29042 /* A bl from Thumb state ISA to an internal ARM state function
29043 is converted to a blx. */
29044 if (fixP->fx_addsy
29045 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29046 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29047 && ARM_IS_FUNC (fixP->fx_addsy)
29048 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29049 {
29050 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29051 newval = newval & ~0x1000;
29052 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29053 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
29054 fixP->fx_done = 1;
29055 }
29056
29057 thumb_bl_common:
29058
29059 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29060 /* For a BLX instruction, make sure that the relocation is rounded up
29061 to a word boundary. This follows the semantics of the instruction
29062 which specifies that bit 1 of the target address will come from bit
29063 1 of the base address. */
29064 value = (value + 3) & ~ 3;
29065
29066 #ifdef OBJ_ELF
29067 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
29068 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29069 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29070 #endif
29071
29072 if (out_of_range_p (value, 22))
29073 {
29074 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
29075 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29076 else if (out_of_range_p (value, 24))
29077 as_bad_where (fixP->fx_file, fixP->fx_line,
29078 _("Thumb2 branch out of range"));
29079 }
29080
29081 if (fixP->fx_done || !seg->use_rela_p)
29082 encode_thumb2_b_bl_offset (buf, value);
29083
29084 break;
29085
29086 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29087 if (out_of_range_p (value, 24))
29088 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29089
29090 if (fixP->fx_done || !seg->use_rela_p)
29091 encode_thumb2_b_bl_offset (buf, value);
29092
29093 break;
29094
29095 case BFD_RELOC_8:
29096 if (fixP->fx_done || !seg->use_rela_p)
29097 *buf = value;
29098 break;
29099
29100 case BFD_RELOC_16:
29101 if (fixP->fx_done || !seg->use_rela_p)
29102 md_number_to_chars (buf, value, 2);
29103 break;
29104
29105 #ifdef OBJ_ELF
29106 case BFD_RELOC_ARM_TLS_CALL:
29107 case BFD_RELOC_ARM_THM_TLS_CALL:
29108 case BFD_RELOC_ARM_TLS_DESCSEQ:
29109 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29110 case BFD_RELOC_ARM_TLS_GOTDESC:
29111 case BFD_RELOC_ARM_TLS_GD32:
29112 case BFD_RELOC_ARM_TLS_LE32:
29113 case BFD_RELOC_ARM_TLS_IE32:
29114 case BFD_RELOC_ARM_TLS_LDM32:
29115 case BFD_RELOC_ARM_TLS_LDO32:
29116 S_SET_THREAD_LOCAL (fixP->fx_addsy);
29117 break;
29118
29119 /* Same handling as above, but with the arm_fdpic guard. */
29120 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29121 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29122 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29123 if (arm_fdpic)
29124 {
29125 S_SET_THREAD_LOCAL (fixP->fx_addsy);
29126 }
29127 else
29128 {
29129 as_bad_where (fixP->fx_file, fixP->fx_line,
29130 _("Relocation supported only in FDPIC mode"));
29131 }
29132 break;
29133
29134 case BFD_RELOC_ARM_GOT32:
29135 case BFD_RELOC_ARM_GOTOFF:
29136 break;
29137
29138 case BFD_RELOC_ARM_GOT_PREL:
29139 if (fixP->fx_done || !seg->use_rela_p)
29140 md_number_to_chars (buf, value, 4);
29141 break;
29142
29143 case BFD_RELOC_ARM_TARGET2:
29144 /* TARGET2 is not partial-inplace, so we need to write the
29145 addend here for REL targets, because it won't be written out
29146 during reloc processing later. */
29147 if (fixP->fx_done || !seg->use_rela_p)
29148 md_number_to_chars (buf, fixP->fx_offset, 4);
29149 break;
29150
29151 /* Relocations for FDPIC. */
29152 case BFD_RELOC_ARM_GOTFUNCDESC:
29153 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29154 case BFD_RELOC_ARM_FUNCDESC:
29155 if (arm_fdpic)
29156 {
29157 if (fixP->fx_done || !seg->use_rela_p)
29158 md_number_to_chars (buf, 0, 4);
29159 }
29160 else
29161 {
29162 as_bad_where (fixP->fx_file, fixP->fx_line,
29163 _("Relocation supported only in FDPIC mode"));
29164 }
29165 break;
29166 #endif
29167
29168 case BFD_RELOC_RVA:
29169 case BFD_RELOC_32:
29170 case BFD_RELOC_ARM_TARGET1:
29171 case BFD_RELOC_ARM_ROSEGREL32:
29172 case BFD_RELOC_ARM_SBREL32:
29173 case BFD_RELOC_32_PCREL:
29174 #ifdef TE_PE
29175 case BFD_RELOC_32_SECREL:
29176 #endif
29177 if (fixP->fx_done || !seg->use_rela_p)
29178 #ifdef TE_WINCE
29179 /* For WinCE we only do this for pcrel fixups. */
29180 if (fixP->fx_done || fixP->fx_pcrel)
29181 #endif
29182 md_number_to_chars (buf, value, 4);
29183 break;
29184
29185 #ifdef OBJ_ELF
29186 case BFD_RELOC_ARM_PREL31:
29187 if (fixP->fx_done || !seg->use_rela_p)
29188 {
29189 newval = md_chars_to_number (buf, 4) & 0x80000000;
29190 if ((value ^ (value >> 1)) & 0x40000000)
29191 {
29192 as_bad_where (fixP->fx_file, fixP->fx_line,
29193 _("rel31 relocation overflow"));
29194 }
29195 newval |= value & 0x7fffffff;
29196 md_number_to_chars (buf, newval, 4);
29197 }
29198 break;
29199 #endif
29200
29201 case BFD_RELOC_ARM_CP_OFF_IMM:
29202 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
29203 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
29204 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
29205 newval = md_chars_to_number (buf, INSN_SIZE);
29206 else
29207 newval = get_thumb32_insn (buf);
29208 if ((newval & 0x0f200f00) == 0x0d000900)
29209 {
29210 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
29211 has permitted values that are multiples of 2, in the range 0
29212 to 510. */
29213 if (value < -510 || value > 510 || (value & 1))
29214 as_bad_where (fixP->fx_file, fixP->fx_line,
29215 _("co-processor offset out of range"));
29216 }
29217 else if ((newval & 0xfe001f80) == 0xec000f80)
29218 {
29219 if (value < -511 || value > 512 || (value & 3))
29220 as_bad_where (fixP->fx_file, fixP->fx_line,
29221 _("co-processor offset out of range"));
29222 }
29223 else if (value < -1023 || value > 1023 || (value & 3))
29224 as_bad_where (fixP->fx_file, fixP->fx_line,
29225 _("co-processor offset out of range"));
29226 cp_off_common:
29227 sign = value > 0;
29228 if (value < 0)
29229 value = -value;
29230 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29231 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29232 newval = md_chars_to_number (buf, INSN_SIZE);
29233 else
29234 newval = get_thumb32_insn (buf);
29235 if (value == 0)
29236 {
29237 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29238 newval &= 0xffffff80;
29239 else
29240 newval &= 0xffffff00;
29241 }
29242 else
29243 {
29244 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29245 newval &= 0xff7fff80;
29246 else
29247 newval &= 0xff7fff00;
29248 if ((newval & 0x0f200f00) == 0x0d000900)
29249 {
29250 /* This is a fp16 vstr/vldr.
29251
29252 It requires the immediate offset in the instruction is shifted
29253 left by 1 to be a half-word offset.
29254
29255 Here, left shift by 1 first, and later right shift by 2
29256 should get the right offset. */
29257 value <<= 1;
29258 }
29259 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
29260 }
29261 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29262 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29263 md_number_to_chars (buf, newval, INSN_SIZE);
29264 else
29265 put_thumb32_insn (buf, newval);
29266 break;
29267
29268 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
29269 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
29270 if (value < -255 || value > 255)
29271 as_bad_where (fixP->fx_file, fixP->fx_line,
29272 _("co-processor offset out of range"));
29273 value *= 4;
29274 goto cp_off_common;
29275
29276 case BFD_RELOC_ARM_THUMB_OFFSET:
29277 newval = md_chars_to_number (buf, THUMB_SIZE);
29278 /* Exactly what ranges, and where the offset is inserted depends
29279 on the type of instruction, we can establish this from the
29280 top 4 bits. */
29281 switch (newval >> 12)
29282 {
29283 case 4: /* PC load. */
29284 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
29285 forced to zero for these loads; md_pcrel_from has already
29286 compensated for this. */
29287 if (value & 3)
29288 as_bad_where (fixP->fx_file, fixP->fx_line,
29289 _("invalid offset, target not word aligned (0x%08lX)"),
29290 (((unsigned long) fixP->fx_frag->fr_address
29291 + (unsigned long) fixP->fx_where) & ~3)
29292 + (unsigned long) value);
29293 else if (get_recorded_alignment (seg) < 2)
29294 as_warn_where (fixP->fx_file, fixP->fx_line,
29295 _("section does not have enough alignment to ensure safe PC-relative loads"));
29296
29297 if (value & ~0x3fc)
29298 as_bad_where (fixP->fx_file, fixP->fx_line,
29299 _("invalid offset, value too big (0x%08lX)"),
29300 (long) value);
29301
29302 newval |= value >> 2;
29303 break;
29304
29305 case 9: /* SP load/store. */
29306 if (value & ~0x3fc)
29307 as_bad_where (fixP->fx_file, fixP->fx_line,
29308 _("invalid offset, value too big (0x%08lX)"),
29309 (long) value);
29310 newval |= value >> 2;
29311 break;
29312
29313 case 6: /* Word load/store. */
29314 if (value & ~0x7c)
29315 as_bad_where (fixP->fx_file, fixP->fx_line,
29316 _("invalid offset, value too big (0x%08lX)"),
29317 (long) value);
29318 newval |= value << 4; /* 6 - 2. */
29319 break;
29320
29321 case 7: /* Byte load/store. */
29322 if (value & ~0x1f)
29323 as_bad_where (fixP->fx_file, fixP->fx_line,
29324 _("invalid offset, value too big (0x%08lX)"),
29325 (long) value);
29326 newval |= value << 6;
29327 break;
29328
29329 case 8: /* Halfword load/store. */
29330 if (value & ~0x3e)
29331 as_bad_where (fixP->fx_file, fixP->fx_line,
29332 _("invalid offset, value too big (0x%08lX)"),
29333 (long) value);
29334 newval |= value << 5; /* 6 - 1. */
29335 break;
29336
29337 default:
29338 as_bad_where (fixP->fx_file, fixP->fx_line,
29339 "Unable to process relocation for thumb opcode: %lx",
29340 (unsigned long) newval);
29341 break;
29342 }
29343 md_number_to_chars (buf, newval, THUMB_SIZE);
29344 break;
29345
29346 case BFD_RELOC_ARM_THUMB_ADD:
29347 /* This is a complicated relocation, since we use it for all of
29348 the following immediate relocations:
29349
29350 3bit ADD/SUB
29351 8bit ADD/SUB
29352 9bit ADD/SUB SP word-aligned
29353 10bit ADD PC/SP word-aligned
29354
29355 The type of instruction being processed is encoded in the
29356 instruction field:
29357
29358 0x8000 SUB
29359 0x00F0 Rd
29360 0x000F Rs
29361 */
29362 newval = md_chars_to_number (buf, THUMB_SIZE);
29363 {
29364 int rd = (newval >> 4) & 0xf;
29365 int rs = newval & 0xf;
29366 int subtract = !!(newval & 0x8000);
29367
29368 /* Check for HI regs, only very restricted cases allowed:
29369 Adjusting SP, and using PC or SP to get an address. */
29370 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
29371 || (rs > 7 && rs != REG_SP && rs != REG_PC))
29372 as_bad_where (fixP->fx_file, fixP->fx_line,
29373 _("invalid Hi register with immediate"));
29374
29375 /* If value is negative, choose the opposite instruction. */
29376 if (value < 0)
29377 {
29378 value = -value;
29379 subtract = !subtract;
29380 if (value < 0)
29381 as_bad_where (fixP->fx_file, fixP->fx_line,
29382 _("immediate value out of range"));
29383 }
29384
29385 if (rd == REG_SP)
29386 {
29387 if (value & ~0x1fc)
29388 as_bad_where (fixP->fx_file, fixP->fx_line,
29389 _("invalid immediate for stack address calculation"));
29390 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
29391 newval |= value >> 2;
29392 }
29393 else if (rs == REG_PC || rs == REG_SP)
29394 {
29395 /* PR gas/18541. If the addition is for a defined symbol
29396 within range of an ADR instruction then accept it. */
29397 if (subtract
29398 && value == 4
29399 && fixP->fx_addsy != NULL)
29400 {
29401 subtract = 0;
29402
29403 if (! S_IS_DEFINED (fixP->fx_addsy)
29404 || S_GET_SEGMENT (fixP->fx_addsy) != seg
29405 || S_IS_WEAK (fixP->fx_addsy))
29406 {
29407 as_bad_where (fixP->fx_file, fixP->fx_line,
29408 _("address calculation needs a strongly defined nearby symbol"));
29409 }
29410 else
29411 {
29412 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
29413
29414 /* Round up to the next 4-byte boundary. */
29415 if (v & 3)
29416 v = (v + 3) & ~ 3;
29417 else
29418 v += 4;
29419 v = S_GET_VALUE (fixP->fx_addsy) - v;
29420
29421 if (v & ~0x3fc)
29422 {
29423 as_bad_where (fixP->fx_file, fixP->fx_line,
29424 _("symbol too far away"));
29425 }
29426 else
29427 {
29428 fixP->fx_done = 1;
29429 value = v;
29430 }
29431 }
29432 }
29433
29434 if (subtract || value & ~0x3fc)
29435 as_bad_where (fixP->fx_file, fixP->fx_line,
29436 _("invalid immediate for address calculation (value = 0x%08lX)"),
29437 (unsigned long) (subtract ? - value : value));
29438 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
29439 newval |= rd << 8;
29440 newval |= value >> 2;
29441 }
29442 else if (rs == rd)
29443 {
29444 if (value & ~0xff)
29445 as_bad_where (fixP->fx_file, fixP->fx_line,
29446 _("immediate value out of range"));
29447 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
29448 newval |= (rd << 8) | value;
29449 }
29450 else
29451 {
29452 if (value & ~0x7)
29453 as_bad_where (fixP->fx_file, fixP->fx_line,
29454 _("immediate value out of range"));
29455 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
29456 newval |= rd | (rs << 3) | (value << 6);
29457 }
29458 }
29459 md_number_to_chars (buf, newval, THUMB_SIZE);
29460 break;
29461
29462 case BFD_RELOC_ARM_THUMB_IMM:
29463 newval = md_chars_to_number (buf, THUMB_SIZE);
29464 if (value < 0 || value > 255)
29465 as_bad_where (fixP->fx_file, fixP->fx_line,
29466 _("invalid immediate: %ld is out of range"),
29467 (long) value);
29468 newval |= value;
29469 md_number_to_chars (buf, newval, THUMB_SIZE);
29470 break;
29471
29472 case BFD_RELOC_ARM_THUMB_SHIFT:
29473 /* 5bit shift value (0..32). LSL cannot take 32. */
29474 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
29475 temp = newval & 0xf800;
29476 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
29477 as_bad_where (fixP->fx_file, fixP->fx_line,
29478 _("invalid shift value: %ld"), (long) value);
29479 /* Shifts of zero must be encoded as LSL. */
29480 if (value == 0)
29481 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
29482 /* Shifts of 32 are encoded as zero. */
29483 else if (value == 32)
29484 value = 0;
29485 newval |= value << 6;
29486 md_number_to_chars (buf, newval, THUMB_SIZE);
29487 break;
29488
29489 case BFD_RELOC_VTABLE_INHERIT:
29490 case BFD_RELOC_VTABLE_ENTRY:
29491 fixP->fx_done = 0;
29492 return;
29493
29494 case BFD_RELOC_ARM_MOVW:
29495 case BFD_RELOC_ARM_MOVT:
29496 case BFD_RELOC_ARM_THUMB_MOVW:
29497 case BFD_RELOC_ARM_THUMB_MOVT:
29498 if (fixP->fx_done || !seg->use_rela_p)
29499 {
29500 /* REL format relocations are limited to a 16-bit addend. */
29501 if (!fixP->fx_done)
29502 {
29503 if (value < -0x8000 || value > 0x7fff)
29504 as_bad_where (fixP->fx_file, fixP->fx_line,
29505 _("offset out of range"));
29506 }
29507 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29508 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29509 {
29510 value >>= 16;
29511 }
29512
29513 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29514 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29515 {
29516 newval = get_thumb32_insn (buf);
29517 newval &= 0xfbf08f00;
29518 newval |= (value & 0xf000) << 4;
29519 newval |= (value & 0x0800) << 15;
29520 newval |= (value & 0x0700) << 4;
29521 newval |= (value & 0x00ff);
29522 put_thumb32_insn (buf, newval);
29523 }
29524 else
29525 {
29526 newval = md_chars_to_number (buf, 4);
29527 newval &= 0xfff0f000;
29528 newval |= value & 0x0fff;
29529 newval |= (value & 0xf000) << 4;
29530 md_number_to_chars (buf, newval, 4);
29531 }
29532 }
29533 return;
29534
29535 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29536 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29537 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29538 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29539 gas_assert (!fixP->fx_done);
29540 {
29541 bfd_vma insn;
29542 bfd_boolean is_mov;
29543 bfd_vma encoded_addend = value;
29544
29545 /* Check that addend can be encoded in instruction. */
29546 if (!seg->use_rela_p && (value < 0 || value > 255))
29547 as_bad_where (fixP->fx_file, fixP->fx_line,
29548 _("the offset 0x%08lX is not representable"),
29549 (unsigned long) encoded_addend);
29550
29551 /* Extract the instruction. */
29552 insn = md_chars_to_number (buf, THUMB_SIZE);
29553 is_mov = (insn & 0xf800) == 0x2000;
29554
29555 /* Encode insn. */
29556 if (is_mov)
29557 {
29558 if (!seg->use_rela_p)
29559 insn |= encoded_addend;
29560 }
29561 else
29562 {
29563 int rd, rs;
29564
29565 /* Extract the instruction. */
29566 /* Encoding is the following
29567 0x8000 SUB
29568 0x00F0 Rd
29569 0x000F Rs
29570 */
29571 /* The following conditions must be true :
29572 - ADD
29573 - Rd == Rs
29574 - Rd <= 7
29575 */
29576 rd = (insn >> 4) & 0xf;
29577 rs = insn & 0xf;
29578 if ((insn & 0x8000) || (rd != rs) || rd > 7)
29579 as_bad_where (fixP->fx_file, fixP->fx_line,
29580 _("Unable to process relocation for thumb opcode: %lx"),
29581 (unsigned long) insn);
29582
29583 /* Encode as ADD immediate8 thumb 1 code. */
29584 insn = 0x3000 | (rd << 8);
29585
29586 /* Place the encoded addend into the first 8 bits of the
29587 instruction. */
29588 if (!seg->use_rela_p)
29589 insn |= encoded_addend;
29590 }
29591
29592 /* Update the instruction. */
29593 md_number_to_chars (buf, insn, THUMB_SIZE);
29594 }
29595 break;
29596
29597 case BFD_RELOC_ARM_ALU_PC_G0_NC:
29598 case BFD_RELOC_ARM_ALU_PC_G0:
29599 case BFD_RELOC_ARM_ALU_PC_G1_NC:
29600 case BFD_RELOC_ARM_ALU_PC_G1:
29601 case BFD_RELOC_ARM_ALU_PC_G2:
29602 case BFD_RELOC_ARM_ALU_SB_G0_NC:
29603 case BFD_RELOC_ARM_ALU_SB_G0:
29604 case BFD_RELOC_ARM_ALU_SB_G1_NC:
29605 case BFD_RELOC_ARM_ALU_SB_G1:
29606 case BFD_RELOC_ARM_ALU_SB_G2:
29607 gas_assert (!fixP->fx_done);
29608 if (!seg->use_rela_p)
29609 {
29610 bfd_vma insn;
29611 bfd_vma encoded_addend;
29612 bfd_vma addend_abs = llabs (value);
29613
29614 /* Check that the absolute value of the addend can be
29615 expressed as an 8-bit constant plus a rotation. */
29616 encoded_addend = encode_arm_immediate (addend_abs);
29617 if (encoded_addend == (unsigned int) FAIL)
29618 as_bad_where (fixP->fx_file, fixP->fx_line,
29619 _("the offset 0x%08lX is not representable"),
29620 (unsigned long) addend_abs);
29621
29622 /* Extract the instruction. */
29623 insn = md_chars_to_number (buf, INSN_SIZE);
29624
29625 /* If the addend is positive, use an ADD instruction.
29626 Otherwise use a SUB. Take care not to destroy the S bit. */
29627 insn &= 0xff1fffff;
29628 if (value < 0)
29629 insn |= 1 << 22;
29630 else
29631 insn |= 1 << 23;
29632
29633 /* Place the encoded addend into the first 12 bits of the
29634 instruction. */
29635 insn &= 0xfffff000;
29636 insn |= encoded_addend;
29637
29638 /* Update the instruction. */
29639 md_number_to_chars (buf, insn, INSN_SIZE);
29640 }
29641 break;
29642
29643 case BFD_RELOC_ARM_LDR_PC_G0:
29644 case BFD_RELOC_ARM_LDR_PC_G1:
29645 case BFD_RELOC_ARM_LDR_PC_G2:
29646 case BFD_RELOC_ARM_LDR_SB_G0:
29647 case BFD_RELOC_ARM_LDR_SB_G1:
29648 case BFD_RELOC_ARM_LDR_SB_G2:
29649 gas_assert (!fixP->fx_done);
29650 if (!seg->use_rela_p)
29651 {
29652 bfd_vma insn;
29653 bfd_vma addend_abs = llabs (value);
29654
29655 /* Check that the absolute value of the addend can be
29656 encoded in 12 bits. */
29657 if (addend_abs >= 0x1000)
29658 as_bad_where (fixP->fx_file, fixP->fx_line,
29659 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29660 (unsigned long) addend_abs);
29661
29662 /* Extract the instruction. */
29663 insn = md_chars_to_number (buf, INSN_SIZE);
29664
29665 /* If the addend is negative, clear bit 23 of the instruction.
29666 Otherwise set it. */
29667 if (value < 0)
29668 insn &= ~(1 << 23);
29669 else
29670 insn |= 1 << 23;
29671
29672 /* Place the absolute value of the addend into the first 12 bits
29673 of the instruction. */
29674 insn &= 0xfffff000;
29675 insn |= addend_abs;
29676
29677 /* Update the instruction. */
29678 md_number_to_chars (buf, insn, INSN_SIZE);
29679 }
29680 break;
29681
29682 case BFD_RELOC_ARM_LDRS_PC_G0:
29683 case BFD_RELOC_ARM_LDRS_PC_G1:
29684 case BFD_RELOC_ARM_LDRS_PC_G2:
29685 case BFD_RELOC_ARM_LDRS_SB_G0:
29686 case BFD_RELOC_ARM_LDRS_SB_G1:
29687 case BFD_RELOC_ARM_LDRS_SB_G2:
29688 gas_assert (!fixP->fx_done);
29689 if (!seg->use_rela_p)
29690 {
29691 bfd_vma insn;
29692 bfd_vma addend_abs = llabs (value);
29693
29694 /* Check that the absolute value of the addend can be
29695 encoded in 8 bits. */
29696 if (addend_abs >= 0x100)
29697 as_bad_where (fixP->fx_file, fixP->fx_line,
29698 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29699 (unsigned long) addend_abs);
29700
29701 /* Extract the instruction. */
29702 insn = md_chars_to_number (buf, INSN_SIZE);
29703
29704 /* If the addend is negative, clear bit 23 of the instruction.
29705 Otherwise set it. */
29706 if (value < 0)
29707 insn &= ~(1 << 23);
29708 else
29709 insn |= 1 << 23;
29710
29711 /* Place the first four bits of the absolute value of the addend
29712 into the first 4 bits of the instruction, and the remaining
29713 four into bits 8 .. 11. */
29714 insn &= 0xfffff0f0;
29715 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29716
29717 /* Update the instruction. */
29718 md_number_to_chars (buf, insn, INSN_SIZE);
29719 }
29720 break;
29721
29722 case BFD_RELOC_ARM_LDC_PC_G0:
29723 case BFD_RELOC_ARM_LDC_PC_G1:
29724 case BFD_RELOC_ARM_LDC_PC_G2:
29725 case BFD_RELOC_ARM_LDC_SB_G0:
29726 case BFD_RELOC_ARM_LDC_SB_G1:
29727 case BFD_RELOC_ARM_LDC_SB_G2:
29728 gas_assert (!fixP->fx_done);
29729 if (!seg->use_rela_p)
29730 {
29731 bfd_vma insn;
29732 bfd_vma addend_abs = llabs (value);
29733
29734 /* Check that the absolute value of the addend is a multiple of
29735 four and, when divided by four, fits in 8 bits. */
29736 if (addend_abs & 0x3)
29737 as_bad_where (fixP->fx_file, fixP->fx_line,
29738 _("bad offset 0x%08lX (must be word-aligned)"),
29739 (unsigned long) addend_abs);
29740
29741 if ((addend_abs >> 2) > 0xff)
29742 as_bad_where (fixP->fx_file, fixP->fx_line,
29743 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29744 (unsigned long) addend_abs);
29745
29746 /* Extract the instruction. */
29747 insn = md_chars_to_number (buf, INSN_SIZE);
29748
29749 /* If the addend is negative, clear bit 23 of the instruction.
29750 Otherwise set it. */
29751 if (value < 0)
29752 insn &= ~(1 << 23);
29753 else
29754 insn |= 1 << 23;
29755
29756 /* Place the addend (divided by four) into the first eight
29757 bits of the instruction. */
29758 insn &= 0xfffffff0;
29759 insn |= addend_abs >> 2;
29760
29761 /* Update the instruction. */
29762 md_number_to_chars (buf, insn, INSN_SIZE);
29763 }
29764 break;
29765
29766 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29767 if (fixP->fx_addsy
29768 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29769 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29770 && ARM_IS_FUNC (fixP->fx_addsy)
29771 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29772 {
29773 /* Force a relocation for a branch 5 bits wide. */
29774 fixP->fx_done = 0;
29775 }
29776 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
29777 as_bad_where (fixP->fx_file, fixP->fx_line,
29778 BAD_BRANCH_OFF);
29779
29780 if (fixP->fx_done || !seg->use_rela_p)
29781 {
29782 addressT boff = value >> 1;
29783
29784 newval = md_chars_to_number (buf, THUMB_SIZE);
29785 newval |= (boff << 7);
29786 md_number_to_chars (buf, newval, THUMB_SIZE);
29787 }
29788 break;
29789
29790 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29791 if (fixP->fx_addsy
29792 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29793 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29794 && ARM_IS_FUNC (fixP->fx_addsy)
29795 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29796 {
29797 fixP->fx_done = 0;
29798 }
29799 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
29800 as_bad_where (fixP->fx_file, fixP->fx_line,
29801 _("branch out of range"));
29802
29803 if (fixP->fx_done || !seg->use_rela_p)
29804 {
29805 newval = md_chars_to_number (buf, THUMB_SIZE);
29806
29807 addressT boff = ((newval & 0x0780) >> 7) << 1;
29808 addressT diff = value - boff;
29809
29810 if (diff == 4)
29811 {
29812 newval |= 1 << 1; /* T bit. */
29813 }
29814 else if (diff != 2)
29815 {
29816 as_bad_where (fixP->fx_file, fixP->fx_line,
29817 _("out of range label-relative fixup value"));
29818 }
29819 md_number_to_chars (buf, newval, THUMB_SIZE);
29820 }
29821 break;
29822
29823 case BFD_RELOC_ARM_THUMB_BF17:
29824 if (fixP->fx_addsy
29825 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29826 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29827 && ARM_IS_FUNC (fixP->fx_addsy)
29828 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29829 {
29830 /* Force a relocation for a branch 17 bits wide. */
29831 fixP->fx_done = 0;
29832 }
29833
29834 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
29835 as_bad_where (fixP->fx_file, fixP->fx_line,
29836 BAD_BRANCH_OFF);
29837
29838 if (fixP->fx_done || !seg->use_rela_p)
29839 {
29840 offsetT newval2;
29841 addressT immA, immB, immC;
29842
29843 immA = (value & 0x0001f000) >> 12;
29844 immB = (value & 0x00000ffc) >> 2;
29845 immC = (value & 0x00000002) >> 1;
29846
29847 newval = md_chars_to_number (buf, THUMB_SIZE);
29848 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29849 newval |= immA;
29850 newval2 |= (immC << 11) | (immB << 1);
29851 md_number_to_chars (buf, newval, THUMB_SIZE);
29852 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29853 }
29854 break;
29855
29856 case BFD_RELOC_ARM_THUMB_BF19:
29857 if (fixP->fx_addsy
29858 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29859 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29860 && ARM_IS_FUNC (fixP->fx_addsy)
29861 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29862 {
29863 /* Force a relocation for a branch 19 bits wide. */
29864 fixP->fx_done = 0;
29865 }
29866
29867 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
29868 as_bad_where (fixP->fx_file, fixP->fx_line,
29869 BAD_BRANCH_OFF);
29870
29871 if (fixP->fx_done || !seg->use_rela_p)
29872 {
29873 offsetT newval2;
29874 addressT immA, immB, immC;
29875
29876 immA = (value & 0x0007f000) >> 12;
29877 immB = (value & 0x00000ffc) >> 2;
29878 immC = (value & 0x00000002) >> 1;
29879
29880 newval = md_chars_to_number (buf, THUMB_SIZE);
29881 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29882 newval |= immA;
29883 newval2 |= (immC << 11) | (immB << 1);
29884 md_number_to_chars (buf, newval, THUMB_SIZE);
29885 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29886 }
29887 break;
29888
29889 case BFD_RELOC_ARM_THUMB_BF13:
29890 if (fixP->fx_addsy
29891 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29892 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29893 && ARM_IS_FUNC (fixP->fx_addsy)
29894 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29895 {
29896 /* Force a relocation for a branch 13 bits wide. */
29897 fixP->fx_done = 0;
29898 }
29899
29900 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
29901 as_bad_where (fixP->fx_file, fixP->fx_line,
29902 BAD_BRANCH_OFF);
29903
29904 if (fixP->fx_done || !seg->use_rela_p)
29905 {
29906 offsetT newval2;
29907 addressT immA, immB, immC;
29908
29909 immA = (value & 0x00001000) >> 12;
29910 immB = (value & 0x00000ffc) >> 2;
29911 immC = (value & 0x00000002) >> 1;
29912
29913 newval = md_chars_to_number (buf, THUMB_SIZE);
29914 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29915 newval |= immA;
29916 newval2 |= (immC << 11) | (immB << 1);
29917 md_number_to_chars (buf, newval, THUMB_SIZE);
29918 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29919 }
29920 break;
29921
29922 case BFD_RELOC_ARM_THUMB_LOOP12:
29923 if (fixP->fx_addsy
29924 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29925 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29926 && ARM_IS_FUNC (fixP->fx_addsy)
29927 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29928 {
29929 /* Force a relocation for a branch 12 bits wide. */
29930 fixP->fx_done = 0;
29931 }
29932
29933 bfd_vma insn = get_thumb32_insn (buf);
29934 /* le lr, <label>, le <label> or letp lr, <label> */
29935 if (((insn & 0xffffffff) == 0xf00fc001)
29936 || ((insn & 0xffffffff) == 0xf02fc001)
29937 || ((insn & 0xffffffff) == 0xf01fc001))
29938 value = -value;
29939
29940 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
29941 as_bad_where (fixP->fx_file, fixP->fx_line,
29942 BAD_BRANCH_OFF);
29943 if (fixP->fx_done || !seg->use_rela_p)
29944 {
29945 addressT imml, immh;
29946
29947 immh = (value & 0x00000ffc) >> 2;
29948 imml = (value & 0x00000002) >> 1;
29949
29950 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29951 newval |= (imml << 11) | (immh << 1);
29952 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29953 }
29954 break;
29955
29956 case BFD_RELOC_ARM_V4BX:
29957 /* This will need to go in the object file. */
29958 fixP->fx_done = 0;
29959 break;
29960
29961 case BFD_RELOC_UNUSED:
29962 default:
29963 as_bad_where (fixP->fx_file, fixP->fx_line,
29964 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
29965 }
29966 }
29967
29968 /* Translate internal representation of relocation info to BFD target
29969 format. */
29970
29971 arelent *
29972 tc_gen_reloc (asection *section, fixS *fixp)
29973 {
29974 arelent * reloc;
29975 bfd_reloc_code_real_type code;
29976
29977 reloc = XNEW (arelent);
29978
29979 reloc->sym_ptr_ptr = XNEW (asymbol *);
29980 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
29981 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
29982
29983 if (fixp->fx_pcrel)
29984 {
29985 if (section->use_rela_p)
29986 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
29987 else
29988 fixp->fx_offset = reloc->address;
29989 }
29990 reloc->addend = fixp->fx_offset;
29991
29992 switch (fixp->fx_r_type)
29993 {
29994 case BFD_RELOC_8:
29995 if (fixp->fx_pcrel)
29996 {
29997 code = BFD_RELOC_8_PCREL;
29998 break;
29999 }
30000 /* Fall through. */
30001
30002 case BFD_RELOC_16:
30003 if (fixp->fx_pcrel)
30004 {
30005 code = BFD_RELOC_16_PCREL;
30006 break;
30007 }
30008 /* Fall through. */
30009
30010 case BFD_RELOC_32:
30011 if (fixp->fx_pcrel)
30012 {
30013 code = BFD_RELOC_32_PCREL;
30014 break;
30015 }
30016 /* Fall through. */
30017
30018 case BFD_RELOC_ARM_MOVW:
30019 if (fixp->fx_pcrel)
30020 {
30021 code = BFD_RELOC_ARM_MOVW_PCREL;
30022 break;
30023 }
30024 /* Fall through. */
30025
30026 case BFD_RELOC_ARM_MOVT:
30027 if (fixp->fx_pcrel)
30028 {
30029 code = BFD_RELOC_ARM_MOVT_PCREL;
30030 break;
30031 }
30032 /* Fall through. */
30033
30034 case BFD_RELOC_ARM_THUMB_MOVW:
30035 if (fixp->fx_pcrel)
30036 {
30037 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
30038 break;
30039 }
30040 /* Fall through. */
30041
30042 case BFD_RELOC_ARM_THUMB_MOVT:
30043 if (fixp->fx_pcrel)
30044 {
30045 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
30046 break;
30047 }
30048 /* Fall through. */
30049
30050 case BFD_RELOC_NONE:
30051 case BFD_RELOC_ARM_PCREL_BRANCH:
30052 case BFD_RELOC_ARM_PCREL_BLX:
30053 case BFD_RELOC_RVA:
30054 case BFD_RELOC_THUMB_PCREL_BRANCH7:
30055 case BFD_RELOC_THUMB_PCREL_BRANCH9:
30056 case BFD_RELOC_THUMB_PCREL_BRANCH12:
30057 case BFD_RELOC_THUMB_PCREL_BRANCH20:
30058 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30059 case BFD_RELOC_THUMB_PCREL_BRANCH25:
30060 case BFD_RELOC_VTABLE_ENTRY:
30061 case BFD_RELOC_VTABLE_INHERIT:
30062 #ifdef TE_PE
30063 case BFD_RELOC_32_SECREL:
30064 #endif
30065 code = fixp->fx_r_type;
30066 break;
30067
30068 case BFD_RELOC_THUMB_PCREL_BLX:
30069 #ifdef OBJ_ELF
30070 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
30071 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
30072 else
30073 #endif
30074 code = BFD_RELOC_THUMB_PCREL_BLX;
30075 break;
30076
30077 case BFD_RELOC_ARM_LITERAL:
30078 case BFD_RELOC_ARM_HWLITERAL:
30079 /* If this is called then the a literal has
30080 been referenced across a section boundary. */
30081 as_bad_where (fixp->fx_file, fixp->fx_line,
30082 _("literal referenced across section boundary"));
30083 return NULL;
30084
30085 #ifdef OBJ_ELF
30086 case BFD_RELOC_ARM_TLS_CALL:
30087 case BFD_RELOC_ARM_THM_TLS_CALL:
30088 case BFD_RELOC_ARM_TLS_DESCSEQ:
30089 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
30090 case BFD_RELOC_ARM_GOT32:
30091 case BFD_RELOC_ARM_GOTOFF:
30092 case BFD_RELOC_ARM_GOT_PREL:
30093 case BFD_RELOC_ARM_PLT32:
30094 case BFD_RELOC_ARM_TARGET1:
30095 case BFD_RELOC_ARM_ROSEGREL32:
30096 case BFD_RELOC_ARM_SBREL32:
30097 case BFD_RELOC_ARM_PREL31:
30098 case BFD_RELOC_ARM_TARGET2:
30099 case BFD_RELOC_ARM_TLS_LDO32:
30100 case BFD_RELOC_ARM_PCREL_CALL:
30101 case BFD_RELOC_ARM_PCREL_JUMP:
30102 case BFD_RELOC_ARM_ALU_PC_G0_NC:
30103 case BFD_RELOC_ARM_ALU_PC_G0:
30104 case BFD_RELOC_ARM_ALU_PC_G1_NC:
30105 case BFD_RELOC_ARM_ALU_PC_G1:
30106 case BFD_RELOC_ARM_ALU_PC_G2:
30107 case BFD_RELOC_ARM_LDR_PC_G0:
30108 case BFD_RELOC_ARM_LDR_PC_G1:
30109 case BFD_RELOC_ARM_LDR_PC_G2:
30110 case BFD_RELOC_ARM_LDRS_PC_G0:
30111 case BFD_RELOC_ARM_LDRS_PC_G1:
30112 case BFD_RELOC_ARM_LDRS_PC_G2:
30113 case BFD_RELOC_ARM_LDC_PC_G0:
30114 case BFD_RELOC_ARM_LDC_PC_G1:
30115 case BFD_RELOC_ARM_LDC_PC_G2:
30116 case BFD_RELOC_ARM_ALU_SB_G0_NC:
30117 case BFD_RELOC_ARM_ALU_SB_G0:
30118 case BFD_RELOC_ARM_ALU_SB_G1_NC:
30119 case BFD_RELOC_ARM_ALU_SB_G1:
30120 case BFD_RELOC_ARM_ALU_SB_G2:
30121 case BFD_RELOC_ARM_LDR_SB_G0:
30122 case BFD_RELOC_ARM_LDR_SB_G1:
30123 case BFD_RELOC_ARM_LDR_SB_G2:
30124 case BFD_RELOC_ARM_LDRS_SB_G0:
30125 case BFD_RELOC_ARM_LDRS_SB_G1:
30126 case BFD_RELOC_ARM_LDRS_SB_G2:
30127 case BFD_RELOC_ARM_LDC_SB_G0:
30128 case BFD_RELOC_ARM_LDC_SB_G1:
30129 case BFD_RELOC_ARM_LDC_SB_G2:
30130 case BFD_RELOC_ARM_V4BX:
30131 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
30132 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
30133 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
30134 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
30135 case BFD_RELOC_ARM_GOTFUNCDESC:
30136 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
30137 case BFD_RELOC_ARM_FUNCDESC:
30138 case BFD_RELOC_ARM_THUMB_BF17:
30139 case BFD_RELOC_ARM_THUMB_BF19:
30140 case BFD_RELOC_ARM_THUMB_BF13:
30141 code = fixp->fx_r_type;
30142 break;
30143
30144 case BFD_RELOC_ARM_TLS_GOTDESC:
30145 case BFD_RELOC_ARM_TLS_GD32:
30146 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
30147 case BFD_RELOC_ARM_TLS_LE32:
30148 case BFD_RELOC_ARM_TLS_IE32:
30149 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
30150 case BFD_RELOC_ARM_TLS_LDM32:
30151 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
30152 /* BFD will include the symbol's address in the addend.
30153 But we don't want that, so subtract it out again here. */
30154 if (!S_IS_COMMON (fixp->fx_addsy))
30155 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
30156 code = fixp->fx_r_type;
30157 break;
30158 #endif
30159
30160 case BFD_RELOC_ARM_IMMEDIATE:
30161 as_bad_where (fixp->fx_file, fixp->fx_line,
30162 _("internal relocation (type: IMMEDIATE) not fixed up"));
30163 return NULL;
30164
30165 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
30166 as_bad_where (fixp->fx_file, fixp->fx_line,
30167 _("ADRL used for a symbol not defined in the same file"));
30168 return NULL;
30169
30170 case BFD_RELOC_THUMB_PCREL_BRANCH5:
30171 case BFD_RELOC_THUMB_PCREL_BFCSEL:
30172 case BFD_RELOC_ARM_THUMB_LOOP12:
30173 as_bad_where (fixp->fx_file, fixp->fx_line,
30174 _("%s used for a symbol not defined in the same file"),
30175 bfd_get_reloc_code_name (fixp->fx_r_type));
30176 return NULL;
30177
30178 case BFD_RELOC_ARM_OFFSET_IMM:
30179 if (section->use_rela_p)
30180 {
30181 code = fixp->fx_r_type;
30182 break;
30183 }
30184
30185 if (fixp->fx_addsy != NULL
30186 && !S_IS_DEFINED (fixp->fx_addsy)
30187 && S_IS_LOCAL (fixp->fx_addsy))
30188 {
30189 as_bad_where (fixp->fx_file, fixp->fx_line,
30190 _("undefined local label `%s'"),
30191 S_GET_NAME (fixp->fx_addsy));
30192 return NULL;
30193 }
30194
30195 as_bad_where (fixp->fx_file, fixp->fx_line,
30196 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
30197 return NULL;
30198
30199 default:
30200 {
30201 const char * type;
30202
30203 switch (fixp->fx_r_type)
30204 {
30205 case BFD_RELOC_NONE: type = "NONE"; break;
30206 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
30207 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
30208 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
30209 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
30210 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
30211 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
30212 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
30213 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
30214 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
30215 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
30216 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
30217 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
30218 default: type = _("<unknown>"); break;
30219 }
30220 as_bad_where (fixp->fx_file, fixp->fx_line,
30221 _("cannot represent %s relocation in this object file format"),
30222 type);
30223 return NULL;
30224 }
30225 }
30226
30227 #ifdef OBJ_ELF
30228 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
30229 && GOT_symbol
30230 && fixp->fx_addsy == GOT_symbol)
30231 {
30232 code = BFD_RELOC_ARM_GOTPC;
30233 reloc->addend = fixp->fx_offset = reloc->address;
30234 }
30235 #endif
30236
30237 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
30238
30239 if (reloc->howto == NULL)
30240 {
30241 as_bad_where (fixp->fx_file, fixp->fx_line,
30242 _("cannot represent %s relocation in this object file format"),
30243 bfd_get_reloc_code_name (code));
30244 return NULL;
30245 }
30246
30247 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
30248 vtable entry to be used in the relocation's section offset. */
30249 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30250 reloc->address = fixp->fx_offset;
30251
30252 return reloc;
30253 }
30254
30255 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
30256
30257 void
30258 cons_fix_new_arm (fragS * frag,
30259 int where,
30260 int size,
30261 expressionS * exp,
30262 bfd_reloc_code_real_type reloc)
30263 {
30264 int pcrel = 0;
30265
30266 /* Pick a reloc.
30267 FIXME: @@ Should look at CPU word size. */
30268 switch (size)
30269 {
30270 case 1:
30271 reloc = BFD_RELOC_8;
30272 break;
30273 case 2:
30274 reloc = BFD_RELOC_16;
30275 break;
30276 case 4:
30277 default:
30278 reloc = BFD_RELOC_32;
30279 break;
30280 case 8:
30281 reloc = BFD_RELOC_64;
30282 break;
30283 }
30284
30285 #ifdef TE_PE
30286 if (exp->X_op == O_secrel)
30287 {
30288 exp->X_op = O_symbol;
30289 reloc = BFD_RELOC_32_SECREL;
30290 }
30291 #endif
30292
30293 fix_new_exp (frag, where, size, exp, pcrel, reloc);
30294 }
30295
30296 #if defined (OBJ_COFF)
30297 void
30298 arm_validate_fix (fixS * fixP)
30299 {
30300 /* If the destination of the branch is a defined symbol which does not have
30301 the THUMB_FUNC attribute, then we must be calling a function which has
30302 the (interfacearm) attribute. We look for the Thumb entry point to that
30303 function and change the branch to refer to that function instead. */
30304 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
30305 && fixP->fx_addsy != NULL
30306 && S_IS_DEFINED (fixP->fx_addsy)
30307 && ! THUMB_IS_FUNC (fixP->fx_addsy))
30308 {
30309 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
30310 }
30311 }
30312 #endif
30313
30314
30315 int
30316 arm_force_relocation (struct fix * fixp)
30317 {
30318 #if defined (OBJ_COFF) && defined (TE_PE)
30319 if (fixp->fx_r_type == BFD_RELOC_RVA)
30320 return 1;
30321 #endif
30322
30323 /* In case we have a call or a branch to a function in ARM ISA mode from
30324 a thumb function or vice-versa force the relocation. These relocations
30325 are cleared off for some cores that might have blx and simple transformations
30326 are possible. */
30327
30328 #ifdef OBJ_ELF
30329 switch (fixp->fx_r_type)
30330 {
30331 case BFD_RELOC_ARM_PCREL_JUMP:
30332 case BFD_RELOC_ARM_PCREL_CALL:
30333 case BFD_RELOC_THUMB_PCREL_BLX:
30334 if (THUMB_IS_FUNC (fixp->fx_addsy))
30335 return 1;
30336 break;
30337
30338 case BFD_RELOC_ARM_PCREL_BLX:
30339 case BFD_RELOC_THUMB_PCREL_BRANCH25:
30340 case BFD_RELOC_THUMB_PCREL_BRANCH20:
30341 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30342 if (ARM_IS_FUNC (fixp->fx_addsy))
30343 return 1;
30344 break;
30345
30346 default:
30347 break;
30348 }
30349 #endif
30350
30351 /* Resolve these relocations even if the symbol is extern or weak.
30352 Technically this is probably wrong due to symbol preemption.
30353 In practice these relocations do not have enough range to be useful
30354 at dynamic link time, and some code (e.g. in the Linux kernel)
30355 expects these references to be resolved. */
30356 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
30357 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
30358 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
30359 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
30360 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
30361 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
30362 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
30363 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
30364 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
30365 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
30366 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
30367 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
30368 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
30369 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
30370 return 0;
30371
30372 /* Always leave these relocations for the linker. */
30373 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30374 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30375 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30376 return 1;
30377
30378 /* Always generate relocations against function symbols. */
30379 if (fixp->fx_r_type == BFD_RELOC_32
30380 && fixp->fx_addsy
30381 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
30382 return 1;
30383
30384 return generic_force_reloc (fixp);
30385 }
30386
30387 #if defined (OBJ_ELF) || defined (OBJ_COFF)
30388 /* Relocations against function names must be left unadjusted,
30389 so that the linker can use this information to generate interworking
30390 stubs. The MIPS version of this function
30391 also prevents relocations that are mips-16 specific, but I do not
30392 know why it does this.
30393
30394 FIXME:
30395 There is one other problem that ought to be addressed here, but
30396 which currently is not: Taking the address of a label (rather
30397 than a function) and then later jumping to that address. Such
30398 addresses also ought to have their bottom bit set (assuming that
30399 they reside in Thumb code), but at the moment they will not. */
30400
30401 bfd_boolean
30402 arm_fix_adjustable (fixS * fixP)
30403 {
30404 if (fixP->fx_addsy == NULL)
30405 return 1;
30406
30407 /* Preserve relocations against symbols with function type. */
30408 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
30409 return FALSE;
30410
30411 if (THUMB_IS_FUNC (fixP->fx_addsy)
30412 && fixP->fx_subsy == NULL)
30413 return FALSE;
30414
30415 /* We need the symbol name for the VTABLE entries. */
30416 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
30417 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30418 return FALSE;
30419
30420 /* Don't allow symbols to be discarded on GOT related relocs. */
30421 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
30422 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
30423 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
30424 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
30425 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
30426 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
30427 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
30428 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
30429 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
30430 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
30431 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
30432 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
30433 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
30434 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
30435 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
30436 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
30437 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
30438 return FALSE;
30439
30440 /* Similarly for group relocations. */
30441 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30442 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30443 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30444 return FALSE;
30445
30446 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
30447 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
30448 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
30449 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
30450 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
30451 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
30452 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
30453 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
30454 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
30455 return FALSE;
30456
30457 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
30458 offsets, so keep these symbols. */
30459 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
30460 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
30461 return FALSE;
30462
30463 return TRUE;
30464 }
30465 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
30466
30467 #ifdef OBJ_ELF
30468 const char *
30469 elf32_arm_target_format (void)
30470 {
30471 #ifdef TE_SYMBIAN
30472 return (target_big_endian
30473 ? "elf32-bigarm-symbian"
30474 : "elf32-littlearm-symbian");
30475 #elif defined (TE_VXWORKS)
30476 return (target_big_endian
30477 ? "elf32-bigarm-vxworks"
30478 : "elf32-littlearm-vxworks");
30479 #elif defined (TE_NACL)
30480 return (target_big_endian
30481 ? "elf32-bigarm-nacl"
30482 : "elf32-littlearm-nacl");
30483 #else
30484 if (arm_fdpic)
30485 {
30486 if (target_big_endian)
30487 return "elf32-bigarm-fdpic";
30488 else
30489 return "elf32-littlearm-fdpic";
30490 }
30491 else
30492 {
30493 if (target_big_endian)
30494 return "elf32-bigarm";
30495 else
30496 return "elf32-littlearm";
30497 }
30498 #endif
30499 }
30500
30501 void
30502 armelf_frob_symbol (symbolS * symp,
30503 int * puntp)
30504 {
30505 elf_frob_symbol (symp, puntp);
30506 }
30507 #endif
30508
30509 /* MD interface: Finalization. */
30510
30511 void
30512 arm_cleanup (void)
30513 {
30514 literal_pool * pool;
30515
30516 /* Ensure that all the predication blocks are properly closed. */
30517 check_pred_blocks_finished ();
30518
30519 for (pool = list_of_pools; pool; pool = pool->next)
30520 {
30521 /* Put it at the end of the relevant section. */
30522 subseg_set (pool->section, pool->sub_section);
30523 #ifdef OBJ_ELF
30524 arm_elf_change_section ();
30525 #endif
30526 s_ltorg (0);
30527 }
30528 }
30529
30530 #ifdef OBJ_ELF
30531 /* Remove any excess mapping symbols generated for alignment frags in
30532 SEC. We may have created a mapping symbol before a zero byte
30533 alignment; remove it if there's a mapping symbol after the
30534 alignment. */
30535 static void
30536 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
30537 void *dummy ATTRIBUTE_UNUSED)
30538 {
30539 segment_info_type *seginfo = seg_info (sec);
30540 fragS *fragp;
30541
30542 if (seginfo == NULL || seginfo->frchainP == NULL)
30543 return;
30544
30545 for (fragp = seginfo->frchainP->frch_root;
30546 fragp != NULL;
30547 fragp = fragp->fr_next)
30548 {
30549 symbolS *sym = fragp->tc_frag_data.last_map;
30550 fragS *next = fragp->fr_next;
30551
30552 /* Variable-sized frags have been converted to fixed size by
30553 this point. But if this was variable-sized to start with,
30554 there will be a fixed-size frag after it. So don't handle
30555 next == NULL. */
30556 if (sym == NULL || next == NULL)
30557 continue;
30558
30559 if (S_GET_VALUE (sym) < next->fr_address)
30560 /* Not at the end of this frag. */
30561 continue;
30562 know (S_GET_VALUE (sym) == next->fr_address);
30563
30564 do
30565 {
30566 if (next->tc_frag_data.first_map != NULL)
30567 {
30568 /* Next frag starts with a mapping symbol. Discard this
30569 one. */
30570 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30571 break;
30572 }
30573
30574 if (next->fr_next == NULL)
30575 {
30576 /* This mapping symbol is at the end of the section. Discard
30577 it. */
30578 know (next->fr_fix == 0 && next->fr_var == 0);
30579 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30580 break;
30581 }
30582
30583 /* As long as we have empty frags without any mapping symbols,
30584 keep looking. */
30585 /* If the next frag is non-empty and does not start with a
30586 mapping symbol, then this mapping symbol is required. */
30587 if (next->fr_address != next->fr_next->fr_address)
30588 break;
30589
30590 next = next->fr_next;
30591 }
30592 while (next != NULL);
30593 }
30594 }
30595 #endif
30596
30597 /* Adjust the symbol table. This marks Thumb symbols as distinct from
30598 ARM ones. */
30599
30600 void
30601 arm_adjust_symtab (void)
30602 {
30603 #ifdef OBJ_COFF
30604 symbolS * sym;
30605
30606 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30607 {
30608 if (ARM_IS_THUMB (sym))
30609 {
30610 if (THUMB_IS_FUNC (sym))
30611 {
30612 /* Mark the symbol as a Thumb function. */
30613 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
30614 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
30615 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30616
30617 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30618 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30619 else
30620 as_bad (_("%s: unexpected function type: %d"),
30621 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30622 }
30623 else switch (S_GET_STORAGE_CLASS (sym))
30624 {
30625 case C_EXT:
30626 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30627 break;
30628 case C_STAT:
30629 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30630 break;
30631 case C_LABEL:
30632 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30633 break;
30634 default:
30635 /* Do nothing. */
30636 break;
30637 }
30638 }
30639
30640 if (ARM_IS_INTERWORK (sym))
30641 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30642 }
30643 #endif
30644 #ifdef OBJ_ELF
30645 symbolS * sym;
30646 char bind;
30647
30648 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30649 {
30650 if (ARM_IS_THUMB (sym))
30651 {
30652 elf_symbol_type * elf_sym;
30653
30654 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30655 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30656
30657 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30658 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30659 {
30660 /* If it's a .thumb_func, declare it as so,
30661 otherwise tag label as .code 16. */
30662 if (THUMB_IS_FUNC (sym))
30663 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30664 ST_BRANCH_TO_THUMB);
30665 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30666 elf_sym->internal_elf_sym.st_info =
30667 ELF_ST_INFO (bind, STT_ARM_16BIT);
30668 }
30669 }
30670 }
30671
30672 /* Remove any overlapping mapping symbols generated by alignment frags. */
30673 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30674 /* Now do generic ELF adjustments. */
30675 elf_adjust_symtab ();
30676 #endif
30677 }
30678
30679 /* MD interface: Initialization. */
30680
30681 static void
30682 set_constant_flonums (void)
30683 {
30684 int i;
30685
30686 for (i = 0; i < NUM_FLOAT_VALS; i++)
30687 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30688 abort ();
30689 }
30690
30691 /* Auto-select Thumb mode if it's the only available instruction set for the
30692 given architecture. */
30693
30694 static void
30695 autoselect_thumb_from_cpu_variant (void)
30696 {
30697 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30698 opcode_select (16);
30699 }
30700
30701 void
30702 md_begin (void)
30703 {
30704 unsigned mach;
30705 unsigned int i;
30706
30707 if ( (arm_ops_hsh = hash_new ()) == NULL
30708 || (arm_cond_hsh = hash_new ()) == NULL
30709 || (arm_vcond_hsh = hash_new ()) == NULL
30710 || (arm_shift_hsh = hash_new ()) == NULL
30711 || (arm_psr_hsh = hash_new ()) == NULL
30712 || (arm_v7m_psr_hsh = hash_new ()) == NULL
30713 || (arm_reg_hsh = hash_new ()) == NULL
30714 || (arm_reloc_hsh = hash_new ()) == NULL
30715 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
30716 as_fatal (_("virtual memory exhausted"));
30717
30718 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30719 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
30720 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30721 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
30722 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30723 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
30724 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30725 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
30726 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30727 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
30728 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30729 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30730 (void *) (v7m_psrs + i));
30731 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30732 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
30733 for (i = 0;
30734 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30735 i++)
30736 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30737 (void *) (barrier_opt_names + i));
30738 #ifdef OBJ_ELF
30739 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30740 {
30741 struct reloc_entry * entry = reloc_names + i;
30742
30743 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30744 /* This makes encode_branch() use the EABI versions of this relocation. */
30745 entry->reloc = BFD_RELOC_UNUSED;
30746
30747 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
30748 }
30749 #endif
30750
30751 set_constant_flonums ();
30752
30753 /* Set the cpu variant based on the command-line options. We prefer
30754 -mcpu= over -march= if both are set (as for GCC); and we prefer
30755 -mfpu= over any other way of setting the floating point unit.
30756 Use of legacy options with new options are faulted. */
30757 if (legacy_cpu)
30758 {
30759 if (mcpu_cpu_opt || march_cpu_opt)
30760 as_bad (_("use of old and new-style options to set CPU type"));
30761
30762 selected_arch = *legacy_cpu;
30763 }
30764 else if (mcpu_cpu_opt)
30765 {
30766 selected_arch = *mcpu_cpu_opt;
30767 selected_ext = *mcpu_ext_opt;
30768 }
30769 else if (march_cpu_opt)
30770 {
30771 selected_arch = *march_cpu_opt;
30772 selected_ext = *march_ext_opt;
30773 }
30774 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30775
30776 if (legacy_fpu)
30777 {
30778 if (mfpu_opt)
30779 as_bad (_("use of old and new-style options to set FPU type"));
30780
30781 selected_fpu = *legacy_fpu;
30782 }
30783 else if (mfpu_opt)
30784 selected_fpu = *mfpu_opt;
30785 else
30786 {
30787 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30788 || defined (TE_NetBSD) || defined (TE_VXWORKS))
30789 /* Some environments specify a default FPU. If they don't, infer it
30790 from the processor. */
30791 if (mcpu_fpu_opt)
30792 selected_fpu = *mcpu_fpu_opt;
30793 else if (march_fpu_opt)
30794 selected_fpu = *march_fpu_opt;
30795 #else
30796 selected_fpu = fpu_default;
30797 #endif
30798 }
30799
30800 if (ARM_FEATURE_ZERO (selected_fpu))
30801 {
30802 if (!no_cpu_selected ())
30803 selected_fpu = fpu_default;
30804 else
30805 selected_fpu = fpu_arch_fpa;
30806 }
30807
30808 #ifdef CPU_DEFAULT
30809 if (ARM_FEATURE_ZERO (selected_arch))
30810 {
30811 selected_arch = cpu_default;
30812 selected_cpu = selected_arch;
30813 }
30814 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30815 #else
30816 /* Autodection of feature mode: allow all features in cpu_variant but leave
30817 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
30818 after all instruction have been processed and we can decide what CPU
30819 should be selected. */
30820 if (ARM_FEATURE_ZERO (selected_arch))
30821 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30822 else
30823 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30824 #endif
30825
30826 autoselect_thumb_from_cpu_variant ();
30827
30828 arm_arch_used = thumb_arch_used = arm_arch_none;
30829
30830 #if defined OBJ_COFF || defined OBJ_ELF
30831 {
30832 unsigned int flags = 0;
30833
30834 #if defined OBJ_ELF
30835 flags = meabi_flags;
30836
30837 switch (meabi_flags)
30838 {
30839 case EF_ARM_EABI_UNKNOWN:
30840 #endif
30841 /* Set the flags in the private structure. */
30842 if (uses_apcs_26) flags |= F_APCS26;
30843 if (support_interwork) flags |= F_INTERWORK;
30844 if (uses_apcs_float) flags |= F_APCS_FLOAT;
30845 if (pic_code) flags |= F_PIC;
30846 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30847 flags |= F_SOFT_FLOAT;
30848
30849 switch (mfloat_abi_opt)
30850 {
30851 case ARM_FLOAT_ABI_SOFT:
30852 case ARM_FLOAT_ABI_SOFTFP:
30853 flags |= F_SOFT_FLOAT;
30854 break;
30855
30856 case ARM_FLOAT_ABI_HARD:
30857 if (flags & F_SOFT_FLOAT)
30858 as_bad (_("hard-float conflicts with specified fpu"));
30859 break;
30860 }
30861
30862 /* Using pure-endian doubles (even if soft-float). */
30863 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30864 flags |= F_VFP_FLOAT;
30865
30866 #if defined OBJ_ELF
30867 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30868 flags |= EF_ARM_MAVERICK_FLOAT;
30869 break;
30870
30871 case EF_ARM_EABI_VER4:
30872 case EF_ARM_EABI_VER5:
30873 /* No additional flags to set. */
30874 break;
30875
30876 default:
30877 abort ();
30878 }
30879 #endif
30880 bfd_set_private_flags (stdoutput, flags);
30881
30882 /* We have run out flags in the COFF header to encode the
30883 status of ATPCS support, so instead we create a dummy,
30884 empty, debug section called .arm.atpcs. */
30885 if (atpcs)
30886 {
30887 asection * sec;
30888
30889 sec = bfd_make_section (stdoutput, ".arm.atpcs");
30890
30891 if (sec != NULL)
30892 {
30893 bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30894 bfd_set_section_size (sec, 0);
30895 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30896 }
30897 }
30898 }
30899 #endif
30900
30901 /* Record the CPU type as well. */
30902 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30903 mach = bfd_mach_arm_iWMMXt2;
30904 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30905 mach = bfd_mach_arm_iWMMXt;
30906 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30907 mach = bfd_mach_arm_XScale;
30908 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30909 mach = bfd_mach_arm_ep9312;
30910 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30911 mach = bfd_mach_arm_5TE;
30912 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30913 {
30914 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30915 mach = bfd_mach_arm_5T;
30916 else
30917 mach = bfd_mach_arm_5;
30918 }
30919 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30920 {
30921 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30922 mach = bfd_mach_arm_4T;
30923 else
30924 mach = bfd_mach_arm_4;
30925 }
30926 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30927 mach = bfd_mach_arm_3M;
30928 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30929 mach = bfd_mach_arm_3;
30930 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30931 mach = bfd_mach_arm_2a;
30932 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30933 mach = bfd_mach_arm_2;
30934 else
30935 mach = bfd_mach_arm_unknown;
30936
30937 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30938 }
30939
30940 /* Command line processing. */
30941
30942 /* md_parse_option
30943 Invocation line includes a switch not recognized by the base assembler.
30944 See if it's a processor-specific option.
30945
30946 This routine is somewhat complicated by the need for backwards
30947 compatibility (since older releases of gcc can't be changed).
30948 The new options try to make the interface as compatible as
30949 possible with GCC.
30950
30951 New options (supported) are:
30952
30953 -mcpu=<cpu name> Assemble for selected processor
30954 -march=<architecture name> Assemble for selected architecture
30955 -mfpu=<fpu architecture> Assemble for selected FPU.
30956 -EB/-mbig-endian Big-endian
30957 -EL/-mlittle-endian Little-endian
30958 -k Generate PIC code
30959 -mthumb Start in Thumb mode
30960 -mthumb-interwork Code supports ARM/Thumb interworking
30961
30962 -m[no-]warn-deprecated Warn about deprecated features
30963 -m[no-]warn-syms Warn when symbols match instructions
30964
30965 For now we will also provide support for:
30966
30967 -mapcs-32 32-bit Program counter
30968 -mapcs-26 26-bit Program counter
30969 -macps-float Floats passed in FP registers
30970 -mapcs-reentrant Reentrant code
30971 -matpcs
30972 (sometime these will probably be replaced with -mapcs=<list of options>
30973 and -matpcs=<list of options>)
30974
30975 The remaining options are only supported for back-wards compatibility.
30976 Cpu variants, the arm part is optional:
30977 -m[arm]1 Currently not supported.
30978 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
30979 -m[arm]3 Arm 3 processor
30980 -m[arm]6[xx], Arm 6 processors
30981 -m[arm]7[xx][t][[d]m] Arm 7 processors
30982 -m[arm]8[10] Arm 8 processors
30983 -m[arm]9[20][tdmi] Arm 9 processors
30984 -mstrongarm[110[0]] StrongARM processors
30985 -mxscale XScale processors
30986 -m[arm]v[2345[t[e]]] Arm architectures
30987 -mall All (except the ARM1)
30988 FP variants:
30989 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
30990 -mfpe-old (No float load/store multiples)
30991 -mvfpxd VFP Single precision
30992 -mvfp All VFP
30993 -mno-fpu Disable all floating point instructions
30994
30995 The following CPU names are recognized:
30996 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
30997 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
30998 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
30999 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
31000 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
31001 arm10t arm10e, arm1020t, arm1020e, arm10200e,
31002 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
31003
31004 */
31005
31006 const char * md_shortopts = "m:k";
31007
31008 #ifdef ARM_BI_ENDIAN
31009 #define OPTION_EB (OPTION_MD_BASE + 0)
31010 #define OPTION_EL (OPTION_MD_BASE + 1)
31011 #else
31012 #if TARGET_BYTES_BIG_ENDIAN
31013 #define OPTION_EB (OPTION_MD_BASE + 0)
31014 #else
31015 #define OPTION_EL (OPTION_MD_BASE + 1)
31016 #endif
31017 #endif
31018 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
31019 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
31020
31021 struct option md_longopts[] =
31022 {
31023 #ifdef OPTION_EB
31024 {"EB", no_argument, NULL, OPTION_EB},
31025 #endif
31026 #ifdef OPTION_EL
31027 {"EL", no_argument, NULL, OPTION_EL},
31028 #endif
31029 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
31030 #ifdef OBJ_ELF
31031 {"fdpic", no_argument, NULL, OPTION_FDPIC},
31032 #endif
31033 {NULL, no_argument, NULL, 0}
31034 };
31035
31036 size_t md_longopts_size = sizeof (md_longopts);
31037
31038 struct arm_option_table
31039 {
31040 const char * option; /* Option name to match. */
31041 const char * help; /* Help information. */
31042 int * var; /* Variable to change. */
31043 int value; /* What to change it to. */
31044 const char * deprecated; /* If non-null, print this message. */
31045 };
31046
31047 struct arm_option_table arm_opts[] =
31048 {
31049 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
31050 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
31051 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
31052 &support_interwork, 1, NULL},
31053 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
31054 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
31055 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
31056 1, NULL},
31057 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
31058 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
31059 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
31060 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
31061 NULL},
31062
31063 /* These are recognized by the assembler, but have no affect on code. */
31064 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
31065 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
31066
31067 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
31068 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
31069 &warn_on_deprecated, 0, NULL},
31070
31071 {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
31072 " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
31073 {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
31074
31075 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
31076 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
31077 {NULL, NULL, NULL, 0, NULL}
31078 };
31079
31080 struct arm_legacy_option_table
31081 {
31082 const char * option; /* Option name to match. */
31083 const arm_feature_set ** var; /* Variable to change. */
31084 const arm_feature_set value; /* What to change it to. */
31085 const char * deprecated; /* If non-null, print this message. */
31086 };
31087
31088 const struct arm_legacy_option_table arm_legacy_opts[] =
31089 {
31090 /* DON'T add any new processors to this list -- we want the whole list
31091 to go away... Add them to the processors table instead. */
31092 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
31093 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
31094 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
31095 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
31096 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31097 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31098 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31099 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31100 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
31101 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
31102 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
31103 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
31104 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
31105 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
31106 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
31107 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
31108 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
31109 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
31110 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
31111 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
31112 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
31113 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
31114 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
31115 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
31116 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
31117 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
31118 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
31119 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
31120 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
31121 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
31122 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
31123 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
31124 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
31125 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
31126 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31127 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31128 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31129 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31130 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31131 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31132 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
31133 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
31134 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
31135 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
31136 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
31137 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
31138 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31139 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31140 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31141 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31142 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31143 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31144 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31145 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31146 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31147 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31148 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
31149 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
31150 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
31151 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
31152 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31153 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31154 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31155 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31156 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31157 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31158 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31159 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31160 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
31161 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
31162 N_("use -mcpu=strongarm110")},
31163 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
31164 N_("use -mcpu=strongarm1100")},
31165 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
31166 N_("use -mcpu=strongarm1110")},
31167 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
31168 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
31169 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
31170
31171 /* Architecture variants -- don't add any more to this list either. */
31172 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
31173 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
31174 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31175 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31176 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
31177 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
31178 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31179 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31180 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
31181 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
31182 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31183 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31184 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
31185 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
31186 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31187 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31188 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31189 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31190
31191 /* Floating point variants -- don't add any more to this list either. */
31192 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
31193 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
31194 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
31195 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
31196 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
31197
31198 {NULL, NULL, ARM_ARCH_NONE, NULL}
31199 };
31200
31201 struct arm_cpu_option_table
31202 {
31203 const char * name;
31204 size_t name_len;
31205 const arm_feature_set value;
31206 const arm_feature_set ext;
31207 /* For some CPUs we assume an FPU unless the user explicitly sets
31208 -mfpu=... */
31209 const arm_feature_set default_fpu;
31210 /* The canonical name of the CPU, or NULL to use NAME converted to upper
31211 case. */
31212 const char * canonical_name;
31213 };
31214
31215 /* This list should, at a minimum, contain all the cpu names
31216 recognized by GCC. */
31217 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
31218
31219 static const struct arm_cpu_option_table arm_cpus[] =
31220 {
31221 ARM_CPU_OPT ("all", NULL, ARM_ANY,
31222 ARM_ARCH_NONE,
31223 FPU_ARCH_FPA),
31224 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
31225 ARM_ARCH_NONE,
31226 FPU_ARCH_FPA),
31227 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
31228 ARM_ARCH_NONE,
31229 FPU_ARCH_FPA),
31230 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
31231 ARM_ARCH_NONE,
31232 FPU_ARCH_FPA),
31233 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
31234 ARM_ARCH_NONE,
31235 FPU_ARCH_FPA),
31236 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
31237 ARM_ARCH_NONE,
31238 FPU_ARCH_FPA),
31239 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
31240 ARM_ARCH_NONE,
31241 FPU_ARCH_FPA),
31242 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
31243 ARM_ARCH_NONE,
31244 FPU_ARCH_FPA),
31245 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
31246 ARM_ARCH_NONE,
31247 FPU_ARCH_FPA),
31248 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
31249 ARM_ARCH_NONE,
31250 FPU_ARCH_FPA),
31251 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
31252 ARM_ARCH_NONE,
31253 FPU_ARCH_FPA),
31254 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
31255 ARM_ARCH_NONE,
31256 FPU_ARCH_FPA),
31257 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
31258 ARM_ARCH_NONE,
31259 FPU_ARCH_FPA),
31260 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
31261 ARM_ARCH_NONE,
31262 FPU_ARCH_FPA),
31263 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
31264 ARM_ARCH_NONE,
31265 FPU_ARCH_FPA),
31266 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
31267 ARM_ARCH_NONE,
31268 FPU_ARCH_FPA),
31269 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
31270 ARM_ARCH_NONE,
31271 FPU_ARCH_FPA),
31272 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
31273 ARM_ARCH_NONE,
31274 FPU_ARCH_FPA),
31275 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
31276 ARM_ARCH_NONE,
31277 FPU_ARCH_FPA),
31278 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
31279 ARM_ARCH_NONE,
31280 FPU_ARCH_FPA),
31281 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
31282 ARM_ARCH_NONE,
31283 FPU_ARCH_FPA),
31284 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
31285 ARM_ARCH_NONE,
31286 FPU_ARCH_FPA),
31287 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
31288 ARM_ARCH_NONE,
31289 FPU_ARCH_FPA),
31290 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
31291 ARM_ARCH_NONE,
31292 FPU_ARCH_FPA),
31293 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
31294 ARM_ARCH_NONE,
31295 FPU_ARCH_FPA),
31296 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
31297 ARM_ARCH_NONE,
31298 FPU_ARCH_FPA),
31299 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
31300 ARM_ARCH_NONE,
31301 FPU_ARCH_FPA),
31302 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
31303 ARM_ARCH_NONE,
31304 FPU_ARCH_FPA),
31305 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
31306 ARM_ARCH_NONE,
31307 FPU_ARCH_FPA),
31308 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
31309 ARM_ARCH_NONE,
31310 FPU_ARCH_FPA),
31311 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
31312 ARM_ARCH_NONE,
31313 FPU_ARCH_FPA),
31314 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
31315 ARM_ARCH_NONE,
31316 FPU_ARCH_FPA),
31317 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
31318 ARM_ARCH_NONE,
31319 FPU_ARCH_FPA),
31320 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
31321 ARM_ARCH_NONE,
31322 FPU_ARCH_FPA),
31323 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
31324 ARM_ARCH_NONE,
31325 FPU_ARCH_FPA),
31326 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
31327 ARM_ARCH_NONE,
31328 FPU_ARCH_FPA),
31329 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
31330 ARM_ARCH_NONE,
31331 FPU_ARCH_FPA),
31332 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
31333 ARM_ARCH_NONE,
31334 FPU_ARCH_FPA),
31335 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
31336 ARM_ARCH_NONE,
31337 FPU_ARCH_FPA),
31338 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
31339 ARM_ARCH_NONE,
31340 FPU_ARCH_FPA),
31341 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
31342 ARM_ARCH_NONE,
31343 FPU_ARCH_FPA),
31344 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
31345 ARM_ARCH_NONE,
31346 FPU_ARCH_FPA),
31347 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
31348 ARM_ARCH_NONE,
31349 FPU_ARCH_FPA),
31350 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
31351 ARM_ARCH_NONE,
31352 FPU_ARCH_FPA),
31353 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
31354 ARM_ARCH_NONE,
31355 FPU_ARCH_FPA),
31356 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
31357 ARM_ARCH_NONE,
31358 FPU_ARCH_FPA),
31359
31360 /* For V5 or later processors we default to using VFP; but the user
31361 should really set the FPU type explicitly. */
31362 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
31363 ARM_ARCH_NONE,
31364 FPU_ARCH_VFP_V2),
31365 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
31366 ARM_ARCH_NONE,
31367 FPU_ARCH_VFP_V2),
31368 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
31369 ARM_ARCH_NONE,
31370 FPU_ARCH_VFP_V2),
31371 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
31372 ARM_ARCH_NONE,
31373 FPU_ARCH_VFP_V2),
31374 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
31375 ARM_ARCH_NONE,
31376 FPU_ARCH_VFP_V2),
31377 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
31378 ARM_ARCH_NONE,
31379 FPU_ARCH_VFP_V2),
31380 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
31381 ARM_ARCH_NONE,
31382 FPU_ARCH_VFP_V2),
31383 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
31384 ARM_ARCH_NONE,
31385 FPU_ARCH_VFP_V2),
31386 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
31387 ARM_ARCH_NONE,
31388 FPU_ARCH_VFP_V2),
31389 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
31390 ARM_ARCH_NONE,
31391 FPU_ARCH_VFP_V2),
31392 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
31393 ARM_ARCH_NONE,
31394 FPU_ARCH_VFP_V2),
31395 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
31396 ARM_ARCH_NONE,
31397 FPU_ARCH_VFP_V2),
31398 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
31399 ARM_ARCH_NONE,
31400 FPU_ARCH_VFP_V1),
31401 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
31402 ARM_ARCH_NONE,
31403 FPU_ARCH_VFP_V1),
31404 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
31405 ARM_ARCH_NONE,
31406 FPU_ARCH_VFP_V2),
31407 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
31408 ARM_ARCH_NONE,
31409 FPU_ARCH_VFP_V2),
31410 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
31411 ARM_ARCH_NONE,
31412 FPU_ARCH_VFP_V1),
31413 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
31414 ARM_ARCH_NONE,
31415 FPU_ARCH_VFP_V2),
31416 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
31417 ARM_ARCH_NONE,
31418 FPU_ARCH_VFP_V2),
31419 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
31420 ARM_ARCH_NONE,
31421 FPU_ARCH_VFP_V2),
31422 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
31423 ARM_ARCH_NONE,
31424 FPU_ARCH_VFP_V2),
31425 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
31426 ARM_ARCH_NONE,
31427 FPU_ARCH_VFP_V2),
31428 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
31429 ARM_ARCH_NONE,
31430 FPU_ARCH_VFP_V2),
31431 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
31432 ARM_ARCH_NONE,
31433 FPU_ARCH_VFP_V2),
31434 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
31435 ARM_ARCH_NONE,
31436 FPU_ARCH_VFP_V2),
31437 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
31438 ARM_ARCH_NONE,
31439 FPU_ARCH_VFP_V2),
31440 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
31441 ARM_ARCH_NONE,
31442 FPU_NONE),
31443 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
31444 ARM_ARCH_NONE,
31445 FPU_NONE),
31446 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
31447 ARM_ARCH_NONE,
31448 FPU_ARCH_VFP_V2),
31449 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
31450 ARM_ARCH_NONE,
31451 FPU_ARCH_VFP_V2),
31452 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
31453 ARM_ARCH_NONE,
31454 FPU_ARCH_VFP_V2),
31455 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
31456 ARM_ARCH_NONE,
31457 FPU_NONE),
31458 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
31459 ARM_ARCH_NONE,
31460 FPU_NONE),
31461 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
31462 ARM_ARCH_NONE,
31463 FPU_ARCH_VFP_V2),
31464 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
31465 ARM_ARCH_NONE,
31466 FPU_NONE),
31467 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
31468 ARM_ARCH_NONE,
31469 FPU_ARCH_VFP_V2),
31470 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
31471 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31472 FPU_NONE),
31473 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
31474 ARM_ARCH_NONE,
31475 FPU_ARCH_NEON_VFP_V4),
31476 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
31477 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31478 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31479 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
31480 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31481 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31482 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
31483 ARM_ARCH_NONE,
31484 FPU_ARCH_NEON_VFP_V4),
31485 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
31486 ARM_ARCH_NONE,
31487 FPU_ARCH_NEON_VFP_V4),
31488 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
31489 ARM_ARCH_NONE,
31490 FPU_ARCH_NEON_VFP_V4),
31491 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
31492 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31493 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31494 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
31495 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31496 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31497 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
31498 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31499 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31500 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
31501 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31502 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31503 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
31504 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31505 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31506 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
31507 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31508 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31509 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
31510 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31511 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31512 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
31513 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31514 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31515 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
31516 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31517 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31518 ARM_CPU_OPT ("cortex-a76ae", "Cortex-A76AE", ARM_ARCH_V8_2A,
31519 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31520 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31521 ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A,
31522 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31523 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31524 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
31525 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31526 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31527 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
31528 ARM_ARCH_NONE,
31529 FPU_NONE),
31530 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
31531 ARM_ARCH_NONE,
31532 FPU_ARCH_VFP_V3D16),
31533 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
31534 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31535 FPU_NONE),
31536 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
31537 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31538 FPU_ARCH_VFP_V3D16),
31539 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
31540 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31541 FPU_ARCH_VFP_V3D16),
31542 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
31543 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31544 FPU_ARCH_NEON_VFP_ARMV8),
31545 ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN,
31546 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31547 FPU_NONE),
31548 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
31549 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31550 FPU_NONE),
31551 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
31552 ARM_ARCH_NONE,
31553 FPU_NONE),
31554 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
31555 ARM_ARCH_NONE,
31556 FPU_NONE),
31557 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
31558 ARM_ARCH_NONE,
31559 FPU_NONE),
31560 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
31561 ARM_ARCH_NONE,
31562 FPU_NONE),
31563 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
31564 ARM_ARCH_NONE,
31565 FPU_NONE),
31566 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
31567 ARM_ARCH_NONE,
31568 FPU_NONE),
31569 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
31570 ARM_ARCH_NONE,
31571 FPU_NONE),
31572 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
31573 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31574 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31575 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
31576 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31577 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31578 /* ??? XSCALE is really an architecture. */
31579 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
31580 ARM_ARCH_NONE,
31581 FPU_ARCH_VFP_V2),
31582
31583 /* ??? iwmmxt is not a processor. */
31584 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
31585 ARM_ARCH_NONE,
31586 FPU_ARCH_VFP_V2),
31587 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
31588 ARM_ARCH_NONE,
31589 FPU_ARCH_VFP_V2),
31590 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
31591 ARM_ARCH_NONE,
31592 FPU_ARCH_VFP_V2),
31593
31594 /* Maverick. */
31595 ARM_CPU_OPT ("ep9312", "ARM920T",
31596 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31597 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31598
31599 /* Marvell processors. */
31600 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
31601 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31602 FPU_ARCH_VFP_V3D16),
31603 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
31604 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31605 FPU_ARCH_NEON_VFP_V4),
31606
31607 /* APM X-Gene family. */
31608 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
31609 ARM_ARCH_NONE,
31610 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31611 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
31612 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31613 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31614
31615 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31616 };
31617 #undef ARM_CPU_OPT
31618
31619 struct arm_ext_table
31620 {
31621 const char * name;
31622 size_t name_len;
31623 const arm_feature_set merge;
31624 const arm_feature_set clear;
31625 };
31626
31627 struct arm_arch_option_table
31628 {
31629 const char * name;
31630 size_t name_len;
31631 const arm_feature_set value;
31632 const arm_feature_set default_fpu;
31633 const struct arm_ext_table * ext_table;
31634 };
31635
31636 /* Used to add support for +E and +noE extension. */
31637 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31638 /* Used to add support for a +E extension. */
31639 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31640 /* Used to add support for a +noE extension. */
31641 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31642
31643 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31644 ~0 & ~FPU_ENDIAN_PURE)
31645
31646 static const struct arm_ext_table armv5te_ext_table[] =
31647 {
31648 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31649 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31650 };
31651
31652 static const struct arm_ext_table armv7_ext_table[] =
31653 {
31654 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31655 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31656 };
31657
31658 static const struct arm_ext_table armv7ve_ext_table[] =
31659 {
31660 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31661 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31662 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31663 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31664 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31665 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
31666 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31667
31668 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31669 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31670
31671 /* Aliases for +simd. */
31672 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31673
31674 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31675 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31676 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31677
31678 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31679 };
31680
31681 static const struct arm_ext_table armv7a_ext_table[] =
31682 {
31683 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31684 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31685 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31686 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31687 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31688 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31689 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31690
31691 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31692 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31693
31694 /* Aliases for +simd. */
31695 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31696 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31697
31698 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31699 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31700
31701 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31702 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31703 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31704 };
31705
31706 static const struct arm_ext_table armv7r_ext_table[] =
31707 {
31708 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31709 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
31710 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31711 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31712 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31713 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31714 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31715 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31716 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31717 };
31718
31719 static const struct arm_ext_table armv7em_ext_table[] =
31720 {
31721 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31722 /* Alias for +fp, used to be known as fpv4-sp-d16. */
31723 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31724 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31725 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31726 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31727 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31728 };
31729
31730 static const struct arm_ext_table armv8a_ext_table[] =
31731 {
31732 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31733 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31734 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31735 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31736
31737 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31738 should use the +simd option to turn on FP. */
31739 ARM_REMOVE ("fp", ALL_FP),
31740 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31741 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31742 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31743 };
31744
31745
31746 static const struct arm_ext_table armv81a_ext_table[] =
31747 {
31748 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31749 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31750 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31751
31752 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31753 should use the +simd option to turn on FP. */
31754 ARM_REMOVE ("fp", ALL_FP),
31755 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31756 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31757 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31758 };
31759
31760 static const struct arm_ext_table armv82a_ext_table[] =
31761 {
31762 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31763 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31764 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31765 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31766 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31767 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31768 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31769 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31770
31771 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31772 should use the +simd option to turn on FP. */
31773 ARM_REMOVE ("fp", ALL_FP),
31774 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31775 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31776 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31777 };
31778
31779 static const struct arm_ext_table armv84a_ext_table[] =
31780 {
31781 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31782 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31783 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31784 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31785 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31786 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31787
31788 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31789 should use the +simd option to turn on FP. */
31790 ARM_REMOVE ("fp", ALL_FP),
31791 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31792 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31793 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31794 };
31795
31796 static const struct arm_ext_table armv85a_ext_table[] =
31797 {
31798 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31799 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31800 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31801 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31802 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31803 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31804
31805 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31806 should use the +simd option to turn on FP. */
31807 ARM_REMOVE ("fp", ALL_FP),
31808 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31809 };
31810
31811 static const struct arm_ext_table armv86a_ext_table[] =
31812 {
31813 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31814 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31815 };
31816
31817 #define CDE_EXTENSIONS \
31818 ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
31819 ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
31820 ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
31821 ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
31822 ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
31823 ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
31824 ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
31825 ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
31826
31827 static const struct arm_ext_table armv8m_main_ext_table[] =
31828 {
31829 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31830 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31831 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31832 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31833 CDE_EXTENSIONS,
31834 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31835 };
31836
31837
31838 static const struct arm_ext_table armv8_1m_main_ext_table[] =
31839 {
31840 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31841 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31842 ARM_EXT ("fp",
31843 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31844 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31845 ALL_FP),
31846 ARM_ADD ("fp.dp",
31847 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31848 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31849 ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP, ARM_EXT2_MVE, 0),
31850 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP)),
31851 ARM_ADD ("mve.fp",
31852 ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
31853 ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
31854 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31855 CDE_EXTENSIONS,
31856 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31857 };
31858
31859 #undef CDE_EXTENSIONS
31860
31861 static const struct arm_ext_table armv8r_ext_table[] =
31862 {
31863 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31864 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31865 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31866 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31867 ARM_REMOVE ("fp", ALL_FP),
31868 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31869 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31870 };
31871
31872 /* This list should, at a minimum, contain all the architecture names
31873 recognized by GCC. */
31874 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31875 #define ARM_ARCH_OPT2(N, V, DF, ext) \
31876 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31877
31878 static const struct arm_arch_option_table arm_archs[] =
31879 {
31880 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
31881 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
31882 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
31883 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
31884 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
31885 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
31886 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
31887 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
31888 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
31889 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
31890 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
31891 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
31892 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
31893 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
31894 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
31895 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
31896 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
31897 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31898 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31899 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
31900 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
31901 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31902 kept to preserve existing behaviour. */
31903 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31904 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31905 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
31906 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
31907 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
31908 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31909 kept to preserve existing behaviour. */
31910 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31911 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31912 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
31913 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
31914 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
31915 /* The official spelling of the ARMv7 profile variants is the dashed form.
31916 Accept the non-dashed form for compatibility with old toolchains. */
31917 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31918 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
31919 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31920 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31921 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31922 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31923 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31924 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
31925 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
31926 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
31927 armv8m_main),
31928 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
31929 armv8_1m_main),
31930 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
31931 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
31932 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
31933 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
31934 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
31935 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
31936 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
31937 ARM_ARCH_OPT2 ("armv8.6-a", ARM_ARCH_V8_6A, FPU_ARCH_VFP, armv86a),
31938 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
31939 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
31940 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
31941 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31942 };
31943 #undef ARM_ARCH_OPT
31944
31945 /* ISA extensions in the co-processor and main instruction set space. */
31946
31947 struct arm_option_extension_value_table
31948 {
31949 const char * name;
31950 size_t name_len;
31951 const arm_feature_set merge_value;
31952 const arm_feature_set clear_value;
31953 /* List of architectures for which an extension is available. ARM_ARCH_NONE
31954 indicates that an extension is available for all architectures while
31955 ARM_ANY marks an empty entry. */
31956 const arm_feature_set allowed_archs[2];
31957 };
31958
31959 /* The following table must be in alphabetical order with a NULL last entry. */
31960
31961 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
31962 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
31963
31964 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
31965 use the context sensitive approach using arm_ext_table's. */
31966 static const struct arm_option_extension_value_table arm_extensions[] =
31967 {
31968 ARM_EXT_OPT ("crc", ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31969 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31970 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31971 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31972 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
31973 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31974 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
31975 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
31976 ARM_ARCH_V8_2A),
31977 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31978 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31979 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
31980 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
31981 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31982 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31983 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31984 ARM_ARCH_V8_2A),
31985 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31986 | ARM_EXT2_FP16_FML),
31987 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31988 | ARM_EXT2_FP16_FML),
31989 ARM_ARCH_V8_2A),
31990 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31991 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31992 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31993 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31994 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
31995 Thumb divide instruction. Due to this having the same name as the
31996 previous entry, this will be ignored when doing command-line parsing and
31997 only considered by build attribute selection code. */
31998 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31999 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32000 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
32001 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
32002 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
32003 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
32004 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
32005 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
32006 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
32007 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32008 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32009 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32010 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32011 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32012 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32013 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
32014 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
32015 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
32016 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32017 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32018 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32019 ARM_ARCH_V8A),
32020 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
32021 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
32022 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32023 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
32024 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
32025 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32026 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32027 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32028 ARM_ARCH_V8A),
32029 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32030 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32031 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
32032 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32033 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
32034 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
32035 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32036 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
32037 | ARM_EXT_DIV),
32038 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
32039 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32040 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
32041 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
32042 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
32043 };
32044 #undef ARM_EXT_OPT
32045
32046 /* ISA floating-point and Advanced SIMD extensions. */
32047 struct arm_option_fpu_value_table
32048 {
32049 const char * name;
32050 const arm_feature_set value;
32051 };
32052
32053 /* This list should, at a minimum, contain all the fpu names
32054 recognized by GCC. */
32055 static const struct arm_option_fpu_value_table arm_fpus[] =
32056 {
32057 {"softfpa", FPU_NONE},
32058 {"fpe", FPU_ARCH_FPE},
32059 {"fpe2", FPU_ARCH_FPE},
32060 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
32061 {"fpa", FPU_ARCH_FPA},
32062 {"fpa10", FPU_ARCH_FPA},
32063 {"fpa11", FPU_ARCH_FPA},
32064 {"arm7500fe", FPU_ARCH_FPA},
32065 {"softvfp", FPU_ARCH_VFP},
32066 {"softvfp+vfp", FPU_ARCH_VFP_V2},
32067 {"vfp", FPU_ARCH_VFP_V2},
32068 {"vfp9", FPU_ARCH_VFP_V2},
32069 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
32070 {"vfp10", FPU_ARCH_VFP_V2},
32071 {"vfp10-r0", FPU_ARCH_VFP_V1},
32072 {"vfpxd", FPU_ARCH_VFP_V1xD},
32073 {"vfpv2", FPU_ARCH_VFP_V2},
32074 {"vfpv3", FPU_ARCH_VFP_V3},
32075 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
32076 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
32077 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
32078 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
32079 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
32080 {"arm1020t", FPU_ARCH_VFP_V1},
32081 {"arm1020e", FPU_ARCH_VFP_V2},
32082 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
32083 {"arm1136jf-s", FPU_ARCH_VFP_V2},
32084 {"maverick", FPU_ARCH_MAVERICK},
32085 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32086 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32087 {"neon-fp16", FPU_ARCH_NEON_FP16},
32088 {"vfpv4", FPU_ARCH_VFP_V4},
32089 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
32090 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
32091 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
32092 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
32093 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
32094 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
32095 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
32096 {"crypto-neon-fp-armv8",
32097 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
32098 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
32099 {"crypto-neon-fp-armv8.1",
32100 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
32101 {NULL, ARM_ARCH_NONE}
32102 };
32103
32104 struct arm_option_value_table
32105 {
32106 const char *name;
32107 long value;
32108 };
32109
32110 static const struct arm_option_value_table arm_float_abis[] =
32111 {
32112 {"hard", ARM_FLOAT_ABI_HARD},
32113 {"softfp", ARM_FLOAT_ABI_SOFTFP},
32114 {"soft", ARM_FLOAT_ABI_SOFT},
32115 {NULL, 0}
32116 };
32117
32118 #ifdef OBJ_ELF
32119 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
32120 static const struct arm_option_value_table arm_eabis[] =
32121 {
32122 {"gnu", EF_ARM_EABI_UNKNOWN},
32123 {"4", EF_ARM_EABI_VER4},
32124 {"5", EF_ARM_EABI_VER5},
32125 {NULL, 0}
32126 };
32127 #endif
32128
32129 struct arm_long_option_table
32130 {
32131 const char * option; /* Substring to match. */
32132 const char * help; /* Help information. */
32133 int (* func) (const char * subopt); /* Function to decode sub-option. */
32134 const char * deprecated; /* If non-null, print this message. */
32135 };
32136
32137 static bfd_boolean
32138 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
32139 arm_feature_set *ext_set,
32140 const struct arm_ext_table *ext_table)
32141 {
32142 /* We insist on extensions being specified in alphabetical order, and with
32143 extensions being added before being removed. We achieve this by having
32144 the global ARM_EXTENSIONS table in alphabetical order, and using the
32145 ADDING_VALUE variable to indicate whether we are adding an extension (1)
32146 or removing it (0) and only allowing it to change in the order
32147 -1 -> 1 -> 0. */
32148 const struct arm_option_extension_value_table * opt = NULL;
32149 const arm_feature_set arm_any = ARM_ANY;
32150 int adding_value = -1;
32151
32152 while (str != NULL && *str != 0)
32153 {
32154 const char *ext;
32155 size_t len;
32156
32157 if (*str != '+')
32158 {
32159 as_bad (_("invalid architectural extension"));
32160 return FALSE;
32161 }
32162
32163 str++;
32164 ext = strchr (str, '+');
32165
32166 if (ext != NULL)
32167 len = ext - str;
32168 else
32169 len = strlen (str);
32170
32171 if (len >= 2 && strncmp (str, "no", 2) == 0)
32172 {
32173 if (adding_value != 0)
32174 {
32175 adding_value = 0;
32176 opt = arm_extensions;
32177 }
32178
32179 len -= 2;
32180 str += 2;
32181 }
32182 else if (len > 0)
32183 {
32184 if (adding_value == -1)
32185 {
32186 adding_value = 1;
32187 opt = arm_extensions;
32188 }
32189 else if (adding_value != 1)
32190 {
32191 as_bad (_("must specify extensions to add before specifying "
32192 "those to remove"));
32193 return FALSE;
32194 }
32195 }
32196
32197 if (len == 0)
32198 {
32199 as_bad (_("missing architectural extension"));
32200 return FALSE;
32201 }
32202
32203 gas_assert (adding_value != -1);
32204 gas_assert (opt != NULL);
32205
32206 if (ext_table != NULL)
32207 {
32208 const struct arm_ext_table * ext_opt = ext_table;
32209 bfd_boolean found = FALSE;
32210 for (; ext_opt->name != NULL; ext_opt++)
32211 if (ext_opt->name_len == len
32212 && strncmp (ext_opt->name, str, len) == 0)
32213 {
32214 if (adding_value)
32215 {
32216 if (ARM_FEATURE_ZERO (ext_opt->merge))
32217 /* TODO: Option not supported. When we remove the
32218 legacy table this case should error out. */
32219 continue;
32220
32221 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
32222 }
32223 else
32224 {
32225 if (ARM_FEATURE_ZERO (ext_opt->clear))
32226 /* TODO: Option not supported. When we remove the
32227 legacy table this case should error out. */
32228 continue;
32229 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
32230 }
32231 found = TRUE;
32232 break;
32233 }
32234 if (found)
32235 {
32236 str = ext;
32237 continue;
32238 }
32239 }
32240
32241 /* Scan over the options table trying to find an exact match. */
32242 for (; opt->name != NULL; opt++)
32243 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32244 {
32245 int i, nb_allowed_archs =
32246 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32247 /* Check we can apply the extension to this architecture. */
32248 for (i = 0; i < nb_allowed_archs; i++)
32249 {
32250 /* Empty entry. */
32251 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
32252 continue;
32253 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
32254 break;
32255 }
32256 if (i == nb_allowed_archs)
32257 {
32258 as_bad (_("extension does not apply to the base architecture"));
32259 return FALSE;
32260 }
32261
32262 /* Add or remove the extension. */
32263 if (adding_value)
32264 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
32265 else
32266 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
32267
32268 /* Allowing Thumb division instructions for ARMv7 in autodetection
32269 rely on this break so that duplicate extensions (extensions
32270 with the same name as a previous extension in the list) are not
32271 considered for command-line parsing. */
32272 break;
32273 }
32274
32275 if (opt->name == NULL)
32276 {
32277 /* Did we fail to find an extension because it wasn't specified in
32278 alphabetical order, or because it does not exist? */
32279
32280 for (opt = arm_extensions; opt->name != NULL; opt++)
32281 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32282 break;
32283
32284 if (opt->name == NULL)
32285 as_bad (_("unknown architectural extension `%s'"), str);
32286 else
32287 as_bad (_("architectural extensions must be specified in "
32288 "alphabetical order"));
32289
32290 return FALSE;
32291 }
32292 else
32293 {
32294 /* We should skip the extension we've just matched the next time
32295 round. */
32296 opt++;
32297 }
32298
32299 str = ext;
32300 };
32301
32302 return TRUE;
32303 }
32304
32305 static bfd_boolean
32306 arm_parse_fp16_opt (const char *str)
32307 {
32308 if (strcasecmp (str, "ieee") == 0)
32309 fp16_format = ARM_FP16_FORMAT_IEEE;
32310 else if (strcasecmp (str, "alternative") == 0)
32311 fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
32312 else
32313 {
32314 as_bad (_("unrecognised float16 format \"%s\""), str);
32315 return FALSE;
32316 }
32317
32318 return TRUE;
32319 }
32320
32321 static bfd_boolean
32322 arm_parse_cpu (const char *str)
32323 {
32324 const struct arm_cpu_option_table *opt;
32325 const char *ext = strchr (str, '+');
32326 size_t len;
32327
32328 if (ext != NULL)
32329 len = ext - str;
32330 else
32331 len = strlen (str);
32332
32333 if (len == 0)
32334 {
32335 as_bad (_("missing cpu name `%s'"), str);
32336 return FALSE;
32337 }
32338
32339 for (opt = arm_cpus; opt->name != NULL; opt++)
32340 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32341 {
32342 mcpu_cpu_opt = &opt->value;
32343 if (mcpu_ext_opt == NULL)
32344 mcpu_ext_opt = XNEW (arm_feature_set);
32345 *mcpu_ext_opt = opt->ext;
32346 mcpu_fpu_opt = &opt->default_fpu;
32347 if (opt->canonical_name)
32348 {
32349 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
32350 strcpy (selected_cpu_name, opt->canonical_name);
32351 }
32352 else
32353 {
32354 size_t i;
32355
32356 if (len >= sizeof selected_cpu_name)
32357 len = (sizeof selected_cpu_name) - 1;
32358
32359 for (i = 0; i < len; i++)
32360 selected_cpu_name[i] = TOUPPER (opt->name[i]);
32361 selected_cpu_name[i] = 0;
32362 }
32363
32364 if (ext != NULL)
32365 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
32366
32367 return TRUE;
32368 }
32369
32370 as_bad (_("unknown cpu `%s'"), str);
32371 return FALSE;
32372 }
32373
32374 static bfd_boolean
32375 arm_parse_arch (const char *str)
32376 {
32377 const struct arm_arch_option_table *opt;
32378 const char *ext = strchr (str, '+');
32379 size_t len;
32380
32381 if (ext != NULL)
32382 len = ext - str;
32383 else
32384 len = strlen (str);
32385
32386 if (len == 0)
32387 {
32388 as_bad (_("missing architecture name `%s'"), str);
32389 return FALSE;
32390 }
32391
32392 for (opt = arm_archs; opt->name != NULL; opt++)
32393 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32394 {
32395 march_cpu_opt = &opt->value;
32396 if (march_ext_opt == NULL)
32397 march_ext_opt = XNEW (arm_feature_set);
32398 *march_ext_opt = arm_arch_none;
32399 march_fpu_opt = &opt->default_fpu;
32400 selected_ctx_ext_table = opt->ext_table;
32401 strcpy (selected_cpu_name, opt->name);
32402
32403 if (ext != NULL)
32404 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
32405 opt->ext_table);
32406
32407 return TRUE;
32408 }
32409
32410 as_bad (_("unknown architecture `%s'\n"), str);
32411 return FALSE;
32412 }
32413
32414 static bfd_boolean
32415 arm_parse_fpu (const char * str)
32416 {
32417 const struct arm_option_fpu_value_table * opt;
32418
32419 for (opt = arm_fpus; opt->name != NULL; opt++)
32420 if (streq (opt->name, str))
32421 {
32422 mfpu_opt = &opt->value;
32423 return TRUE;
32424 }
32425
32426 as_bad (_("unknown floating point format `%s'\n"), str);
32427 return FALSE;
32428 }
32429
32430 static bfd_boolean
32431 arm_parse_float_abi (const char * str)
32432 {
32433 const struct arm_option_value_table * opt;
32434
32435 for (opt = arm_float_abis; opt->name != NULL; opt++)
32436 if (streq (opt->name, str))
32437 {
32438 mfloat_abi_opt = opt->value;
32439 return TRUE;
32440 }
32441
32442 as_bad (_("unknown floating point abi `%s'\n"), str);
32443 return FALSE;
32444 }
32445
32446 #ifdef OBJ_ELF
32447 static bfd_boolean
32448 arm_parse_eabi (const char * str)
32449 {
32450 const struct arm_option_value_table *opt;
32451
32452 for (opt = arm_eabis; opt->name != NULL; opt++)
32453 if (streq (opt->name, str))
32454 {
32455 meabi_flags = opt->value;
32456 return TRUE;
32457 }
32458 as_bad (_("unknown EABI `%s'\n"), str);
32459 return FALSE;
32460 }
32461 #endif
32462
32463 static bfd_boolean
32464 arm_parse_it_mode (const char * str)
32465 {
32466 bfd_boolean ret = TRUE;
32467
32468 if (streq ("arm", str))
32469 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
32470 else if (streq ("thumb", str))
32471 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
32472 else if (streq ("always", str))
32473 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
32474 else if (streq ("never", str))
32475 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
32476 else
32477 {
32478 as_bad (_("unknown implicit IT mode `%s', should be "\
32479 "arm, thumb, always, or never."), str);
32480 ret = FALSE;
32481 }
32482
32483 return ret;
32484 }
32485
32486 static bfd_boolean
32487 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
32488 {
32489 codecomposer_syntax = TRUE;
32490 arm_comment_chars[0] = ';';
32491 arm_line_separator_chars[0] = 0;
32492 return TRUE;
32493 }
32494
32495 struct arm_long_option_table arm_long_opts[] =
32496 {
32497 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
32498 arm_parse_cpu, NULL},
32499 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
32500 arm_parse_arch, NULL},
32501 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
32502 arm_parse_fpu, NULL},
32503 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
32504 arm_parse_float_abi, NULL},
32505 #ifdef OBJ_ELF
32506 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
32507 arm_parse_eabi, NULL},
32508 #endif
32509 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
32510 arm_parse_it_mode, NULL},
32511 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
32512 arm_ccs_mode, NULL},
32513 {"mfp16-format=",
32514 N_("[ieee|alternative]\n\
32515 set the encoding for half precision floating point "
32516 "numbers to IEEE\n\
32517 or Arm alternative format."),
32518 arm_parse_fp16_opt, NULL },
32519 {NULL, NULL, 0, NULL}
32520 };
32521
32522 int
32523 md_parse_option (int c, const char * arg)
32524 {
32525 struct arm_option_table *opt;
32526 const struct arm_legacy_option_table *fopt;
32527 struct arm_long_option_table *lopt;
32528
32529 switch (c)
32530 {
32531 #ifdef OPTION_EB
32532 case OPTION_EB:
32533 target_big_endian = 1;
32534 break;
32535 #endif
32536
32537 #ifdef OPTION_EL
32538 case OPTION_EL:
32539 target_big_endian = 0;
32540 break;
32541 #endif
32542
32543 case OPTION_FIX_V4BX:
32544 fix_v4bx = TRUE;
32545 break;
32546
32547 #ifdef OBJ_ELF
32548 case OPTION_FDPIC:
32549 arm_fdpic = TRUE;
32550 break;
32551 #endif /* OBJ_ELF */
32552
32553 case 'a':
32554 /* Listing option. Just ignore these, we don't support additional
32555 ones. */
32556 return 0;
32557
32558 default:
32559 for (opt = arm_opts; opt->option != NULL; opt++)
32560 {
32561 if (c == opt->option[0]
32562 && ((arg == NULL && opt->option[1] == 0)
32563 || streq (arg, opt->option + 1)))
32564 {
32565 /* If the option is deprecated, tell the user. */
32566 if (warn_on_deprecated && opt->deprecated != NULL)
32567 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32568 arg ? arg : "", _(opt->deprecated));
32569
32570 if (opt->var != NULL)
32571 *opt->var = opt->value;
32572
32573 return 1;
32574 }
32575 }
32576
32577 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32578 {
32579 if (c == fopt->option[0]
32580 && ((arg == NULL && fopt->option[1] == 0)
32581 || streq (arg, fopt->option + 1)))
32582 {
32583 /* If the option is deprecated, tell the user. */
32584 if (warn_on_deprecated && fopt->deprecated != NULL)
32585 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32586 arg ? arg : "", _(fopt->deprecated));
32587
32588 if (fopt->var != NULL)
32589 *fopt->var = &fopt->value;
32590
32591 return 1;
32592 }
32593 }
32594
32595 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32596 {
32597 /* These options are expected to have an argument. */
32598 if (c == lopt->option[0]
32599 && arg != NULL
32600 && strncmp (arg, lopt->option + 1,
32601 strlen (lopt->option + 1)) == 0)
32602 {
32603 /* If the option is deprecated, tell the user. */
32604 if (warn_on_deprecated && lopt->deprecated != NULL)
32605 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32606 _(lopt->deprecated));
32607
32608 /* Call the sup-option parser. */
32609 return lopt->func (arg + strlen (lopt->option) - 1);
32610 }
32611 }
32612
32613 return 0;
32614 }
32615
32616 return 1;
32617 }
32618
32619 void
32620 md_show_usage (FILE * fp)
32621 {
32622 struct arm_option_table *opt;
32623 struct arm_long_option_table *lopt;
32624
32625 fprintf (fp, _(" ARM-specific assembler options:\n"));
32626
32627 for (opt = arm_opts; opt->option != NULL; opt++)
32628 if (opt->help != NULL)
32629 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
32630
32631 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32632 if (lopt->help != NULL)
32633 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
32634
32635 #ifdef OPTION_EB
32636 fprintf (fp, _("\
32637 -EB assemble code for a big-endian cpu\n"));
32638 #endif
32639
32640 #ifdef OPTION_EL
32641 fprintf (fp, _("\
32642 -EL assemble code for a little-endian cpu\n"));
32643 #endif
32644
32645 fprintf (fp, _("\
32646 --fix-v4bx Allow BX in ARMv4 code\n"));
32647
32648 #ifdef OBJ_ELF
32649 fprintf (fp, _("\
32650 --fdpic generate an FDPIC object file\n"));
32651 #endif /* OBJ_ELF */
32652 }
32653
32654 #ifdef OBJ_ELF
32655
32656 typedef struct
32657 {
32658 int val;
32659 arm_feature_set flags;
32660 } cpu_arch_ver_table;
32661
32662 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
32663 chronologically for architectures, with an exception for ARMv6-M and
32664 ARMv6S-M due to legacy reasons. No new architecture should have a
32665 special case. This allows for build attribute selection results to be
32666 stable when new architectures are added. */
32667 static const cpu_arch_ver_table cpu_arch_ver[] =
32668 {
32669 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
32670 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
32671 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
32672 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
32673 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
32674 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
32675 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
32676 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
32677 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
32678 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
32679 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
32680 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
32681 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
32682 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
32683 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
32684 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
32685 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
32686 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
32687 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
32688 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
32689 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
32690 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
32691 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
32692 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
32693
32694 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32695 always selected build attributes to match those of ARMv6-M
32696 (resp. ARMv6S-M). However, due to these architectures being a strict
32697 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32698 would be selected when fully respecting chronology of architectures.
32699 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32700 move them before ARMv7 architectures. */
32701 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
32702 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
32703
32704 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
32705 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
32706 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
32707 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
32708 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
32709 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
32710 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
32711 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
32712 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
32713 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
32714 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
32715 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
32716 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
32717 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
32718 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
32719 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32720 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_6A},
32721 {-1, ARM_ARCH_NONE}
32722 };
32723
32724 /* Set an attribute if it has not already been set by the user. */
32725
32726 static void
32727 aeabi_set_attribute_int (int tag, int value)
32728 {
32729 if (tag < 1
32730 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32731 || !attributes_set_explicitly[tag])
32732 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32733 }
32734
32735 static void
32736 aeabi_set_attribute_string (int tag, const char *value)
32737 {
32738 if (tag < 1
32739 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32740 || !attributes_set_explicitly[tag])
32741 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32742 }
32743
32744 /* Return whether features in the *NEEDED feature set are available via
32745 extensions for the architecture whose feature set is *ARCH_FSET. */
32746
32747 static bfd_boolean
32748 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32749 const arm_feature_set *needed)
32750 {
32751 int i, nb_allowed_archs;
32752 arm_feature_set ext_fset;
32753 const struct arm_option_extension_value_table *opt;
32754
32755 ext_fset = arm_arch_none;
32756 for (opt = arm_extensions; opt->name != NULL; opt++)
32757 {
32758 /* Extension does not provide any feature we need. */
32759 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32760 continue;
32761
32762 nb_allowed_archs =
32763 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32764 for (i = 0; i < nb_allowed_archs; i++)
32765 {
32766 /* Empty entry. */
32767 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32768 break;
32769
32770 /* Extension is available, add it. */
32771 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32772 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32773 }
32774 }
32775
32776 /* Can we enable all features in *needed? */
32777 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32778 }
32779
32780 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32781 a given architecture feature set *ARCH_EXT_FSET including extension feature
32782 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
32783 - if true, check for an exact match of the architecture modulo extensions;
32784 - otherwise, select build attribute value of the first superset
32785 architecture released so that results remains stable when new architectures
32786 are added.
32787 For -march/-mcpu=all the build attribute value of the most featureful
32788 architecture is returned. Tag_CPU_arch_profile result is returned in
32789 PROFILE. */
32790
32791 static int
32792 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32793 const arm_feature_set *ext_fset,
32794 char *profile, int exact_match)
32795 {
32796 arm_feature_set arch_fset;
32797 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32798
32799 /* Select most featureful architecture with all its extensions if building
32800 for -march=all as the feature sets used to set build attributes. */
32801 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32802 {
32803 /* Force revisiting of decision for each new architecture. */
32804 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32805 *profile = 'A';
32806 return TAG_CPU_ARCH_V8;
32807 }
32808
32809 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32810
32811 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32812 {
32813 arm_feature_set known_arch_fset;
32814
32815 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32816 if (exact_match)
32817 {
32818 /* Base architecture match user-specified architecture and
32819 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
32820 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32821 {
32822 p_ver_ret = p_ver;
32823 goto found;
32824 }
32825 /* Base architecture match user-specified architecture only
32826 (eg. ARMv6-M in the same case as above). Record it in case we
32827 find a match with above condition. */
32828 else if (p_ver_ret == NULL
32829 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32830 p_ver_ret = p_ver;
32831 }
32832 else
32833 {
32834
32835 /* Architecture has all features wanted. */
32836 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32837 {
32838 arm_feature_set added_fset;
32839
32840 /* Compute features added by this architecture over the one
32841 recorded in p_ver_ret. */
32842 if (p_ver_ret != NULL)
32843 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32844 p_ver_ret->flags);
32845 /* First architecture that match incl. with extensions, or the
32846 only difference in features over the recorded match is
32847 features that were optional and are now mandatory. */
32848 if (p_ver_ret == NULL
32849 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32850 {
32851 p_ver_ret = p_ver;
32852 goto found;
32853 }
32854 }
32855 else if (p_ver_ret == NULL)
32856 {
32857 arm_feature_set needed_ext_fset;
32858
32859 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32860
32861 /* Architecture has all features needed when using some
32862 extensions. Record it and continue searching in case there
32863 exist an architecture providing all needed features without
32864 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32865 OS extension). */
32866 if (have_ext_for_needed_feat_p (&known_arch_fset,
32867 &needed_ext_fset))
32868 p_ver_ret = p_ver;
32869 }
32870 }
32871 }
32872
32873 if (p_ver_ret == NULL)
32874 return -1;
32875
32876 found:
32877 /* Tag_CPU_arch_profile. */
32878 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32879 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32880 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32881 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
32882 *profile = 'A';
32883 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
32884 *profile = 'R';
32885 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32886 *profile = 'M';
32887 else
32888 *profile = '\0';
32889 return p_ver_ret->val;
32890 }
32891
32892 /* Set the public EABI object attributes. */
32893
32894 static void
32895 aeabi_set_public_attributes (void)
32896 {
32897 char profile = '\0';
32898 int arch = -1;
32899 int virt_sec = 0;
32900 int fp16_optional = 0;
32901 int skip_exact_match = 0;
32902 arm_feature_set flags, flags_arch, flags_ext;
32903
32904 /* Autodetection mode, choose the architecture based the instructions
32905 actually used. */
32906 if (no_cpu_selected ())
32907 {
32908 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32909
32910 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32911 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32912
32913 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32914 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32915
32916 /* Code run during relaxation relies on selected_cpu being set. */
32917 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32918 flags_ext = arm_arch_none;
32919 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32920 selected_ext = flags_ext;
32921 selected_cpu = flags;
32922 }
32923 /* Otherwise, choose the architecture based on the capabilities of the
32924 requested cpu. */
32925 else
32926 {
32927 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32928 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32929 flags_ext = selected_ext;
32930 flags = selected_cpu;
32931 }
32932 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32933
32934 /* Allow the user to override the reported architecture. */
32935 if (!ARM_FEATURE_ZERO (selected_object_arch))
32936 {
32937 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32938 flags_ext = arm_arch_none;
32939 }
32940 else
32941 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32942
32943 /* When this function is run again after relaxation has happened there is no
32944 way to determine whether an architecture or CPU was specified by the user:
32945 - selected_cpu is set above for relaxation to work;
32946 - march_cpu_opt is not set if only -mcpu or .cpu is used;
32947 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
32948 Therefore, if not in -march=all case we first try an exact match and fall
32949 back to autodetection. */
32950 if (!skip_exact_match)
32951 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
32952 if (arch == -1)
32953 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
32954 if (arch == -1)
32955 as_bad (_("no architecture contains all the instructions used\n"));
32956
32957 /* Tag_CPU_name. */
32958 if (selected_cpu_name[0])
32959 {
32960 char *q;
32961
32962 q = selected_cpu_name;
32963 if (strncmp (q, "armv", 4) == 0)
32964 {
32965 int i;
32966
32967 q += 4;
32968 for (i = 0; q[i]; i++)
32969 q[i] = TOUPPER (q[i]);
32970 }
32971 aeabi_set_attribute_string (Tag_CPU_name, q);
32972 }
32973
32974 /* Tag_CPU_arch. */
32975 aeabi_set_attribute_int (Tag_CPU_arch, arch);
32976
32977 /* Tag_CPU_arch_profile. */
32978 if (profile != '\0')
32979 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
32980
32981 /* Tag_DSP_extension. */
32982 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
32983 aeabi_set_attribute_int (Tag_DSP_extension, 1);
32984
32985 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32986 /* Tag_ARM_ISA_use. */
32987 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
32988 || ARM_FEATURE_ZERO (flags_arch))
32989 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
32990
32991 /* Tag_THUMB_ISA_use. */
32992 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
32993 || ARM_FEATURE_ZERO (flags_arch))
32994 {
32995 int thumb_isa_use;
32996
32997 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32998 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
32999 thumb_isa_use = 3;
33000 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
33001 thumb_isa_use = 2;
33002 else
33003 thumb_isa_use = 1;
33004 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
33005 }
33006
33007 /* Tag_VFP_arch. */
33008 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
33009 aeabi_set_attribute_int (Tag_VFP_arch,
33010 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33011 ? 7 : 8);
33012 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
33013 aeabi_set_attribute_int (Tag_VFP_arch,
33014 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33015 ? 5 : 6);
33016 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
33017 {
33018 fp16_optional = 1;
33019 aeabi_set_attribute_int (Tag_VFP_arch, 3);
33020 }
33021 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
33022 {
33023 aeabi_set_attribute_int (Tag_VFP_arch, 4);
33024 fp16_optional = 1;
33025 }
33026 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
33027 aeabi_set_attribute_int (Tag_VFP_arch, 2);
33028 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
33029 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
33030 aeabi_set_attribute_int (Tag_VFP_arch, 1);
33031
33032 /* Tag_ABI_HardFP_use. */
33033 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
33034 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
33035 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
33036
33037 /* Tag_WMMX_arch. */
33038 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
33039 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
33040 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
33041 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
33042
33043 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
33044 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
33045 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
33046 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
33047 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
33048 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
33049 {
33050 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
33051 {
33052 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
33053 }
33054 else
33055 {
33056 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
33057 fp16_optional = 1;
33058 }
33059 }
33060
33061 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
33062 aeabi_set_attribute_int (Tag_MVE_arch, 2);
33063 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
33064 aeabi_set_attribute_int (Tag_MVE_arch, 1);
33065
33066 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
33067 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
33068 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
33069
33070 /* Tag_DIV_use.
33071
33072 We set Tag_DIV_use to two when integer divide instructions have been used
33073 in ARM state, or when Thumb integer divide instructions have been used,
33074 but we have no architecture profile set, nor have we any ARM instructions.
33075
33076 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
33077 by the base architecture.
33078
33079 For new architectures we will have to check these tests. */
33080 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
33081 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33082 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
33083 aeabi_set_attribute_int (Tag_DIV_use, 0);
33084 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
33085 || (profile == '\0'
33086 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
33087 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
33088 aeabi_set_attribute_int (Tag_DIV_use, 2);
33089
33090 /* Tag_MP_extension_use. */
33091 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
33092 aeabi_set_attribute_int (Tag_MPextension_use, 1);
33093
33094 /* Tag Virtualization_use. */
33095 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
33096 virt_sec |= 1;
33097 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
33098 virt_sec |= 2;
33099 if (virt_sec != 0)
33100 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
33101
33102 if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
33103 aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
33104 }
33105
33106 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
33107 finished and free extension feature bits which will not be used anymore. */
33108
33109 void
33110 arm_md_post_relax (void)
33111 {
33112 aeabi_set_public_attributes ();
33113 XDELETE (mcpu_ext_opt);
33114 mcpu_ext_opt = NULL;
33115 XDELETE (march_ext_opt);
33116 march_ext_opt = NULL;
33117 }
33118
33119 /* Add the default contents for the .ARM.attributes section. */
33120
33121 void
33122 arm_md_end (void)
33123 {
33124 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
33125 return;
33126
33127 aeabi_set_public_attributes ();
33128 }
33129 #endif /* OBJ_ELF */
33130
33131 /* Parse a .cpu directive. */
33132
33133 static void
33134 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
33135 {
33136 const struct arm_cpu_option_table *opt;
33137 char *name;
33138 char saved_char;
33139
33140 name = input_line_pointer;
33141 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33142 input_line_pointer++;
33143 saved_char = *input_line_pointer;
33144 *input_line_pointer = 0;
33145
33146 /* Skip the first "all" entry. */
33147 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
33148 if (streq (opt->name, name))
33149 {
33150 selected_arch = opt->value;
33151 selected_ext = opt->ext;
33152 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33153 if (opt->canonical_name)
33154 strcpy (selected_cpu_name, opt->canonical_name);
33155 else
33156 {
33157 int i;
33158 for (i = 0; opt->name[i]; i++)
33159 selected_cpu_name[i] = TOUPPER (opt->name[i]);
33160
33161 selected_cpu_name[i] = 0;
33162 }
33163 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33164
33165 *input_line_pointer = saved_char;
33166 demand_empty_rest_of_line ();
33167 return;
33168 }
33169 as_bad (_("unknown cpu `%s'"), name);
33170 *input_line_pointer = saved_char;
33171 ignore_rest_of_line ();
33172 }
33173
33174 /* Parse a .arch directive. */
33175
33176 static void
33177 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
33178 {
33179 const struct arm_arch_option_table *opt;
33180 char saved_char;
33181 char *name;
33182
33183 name = input_line_pointer;
33184 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33185 input_line_pointer++;
33186 saved_char = *input_line_pointer;
33187 *input_line_pointer = 0;
33188
33189 /* Skip the first "all" entry. */
33190 for (opt = arm_archs + 1; opt->name != NULL; opt++)
33191 if (streq (opt->name, name))
33192 {
33193 selected_arch = opt->value;
33194 selected_ctx_ext_table = opt->ext_table;
33195 selected_ext = arm_arch_none;
33196 selected_cpu = selected_arch;
33197 strcpy (selected_cpu_name, opt->name);
33198 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33199 *input_line_pointer = saved_char;
33200 demand_empty_rest_of_line ();
33201 return;
33202 }
33203
33204 as_bad (_("unknown architecture `%s'\n"), name);
33205 *input_line_pointer = saved_char;
33206 ignore_rest_of_line ();
33207 }
33208
33209 /* Parse a .object_arch directive. */
33210
33211 static void
33212 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
33213 {
33214 const struct arm_arch_option_table *opt;
33215 char saved_char;
33216 char *name;
33217
33218 name = input_line_pointer;
33219 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33220 input_line_pointer++;
33221 saved_char = *input_line_pointer;
33222 *input_line_pointer = 0;
33223
33224 /* Skip the first "all" entry. */
33225 for (opt = arm_archs + 1; opt->name != NULL; opt++)
33226 if (streq (opt->name, name))
33227 {
33228 selected_object_arch = opt->value;
33229 *input_line_pointer = saved_char;
33230 demand_empty_rest_of_line ();
33231 return;
33232 }
33233
33234 as_bad (_("unknown architecture `%s'\n"), name);
33235 *input_line_pointer = saved_char;
33236 ignore_rest_of_line ();
33237 }
33238
33239 /* Parse a .arch_extension directive. */
33240
33241 static void
33242 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
33243 {
33244 const struct arm_option_extension_value_table *opt;
33245 char saved_char;
33246 char *name;
33247 int adding_value = 1;
33248
33249 name = input_line_pointer;
33250 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33251 input_line_pointer++;
33252 saved_char = *input_line_pointer;
33253 *input_line_pointer = 0;
33254
33255 if (strlen (name) >= 2
33256 && strncmp (name, "no", 2) == 0)
33257 {
33258 adding_value = 0;
33259 name += 2;
33260 }
33261
33262 /* Check the context specific extension table */
33263 if (selected_ctx_ext_table)
33264 {
33265 const struct arm_ext_table * ext_opt;
33266 for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
33267 {
33268 if (streq (ext_opt->name, name))
33269 {
33270 if (adding_value)
33271 {
33272 if (ARM_FEATURE_ZERO (ext_opt->merge))
33273 /* TODO: Option not supported. When we remove the
33274 legacy table this case should error out. */
33275 continue;
33276 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33277 ext_opt->merge);
33278 }
33279 else
33280 ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
33281
33282 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33283 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33284 *input_line_pointer = saved_char;
33285 demand_empty_rest_of_line ();
33286 return;
33287 }
33288 }
33289 }
33290
33291 for (opt = arm_extensions; opt->name != NULL; opt++)
33292 if (streq (opt->name, name))
33293 {
33294 int i, nb_allowed_archs =
33295 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
33296 for (i = 0; i < nb_allowed_archs; i++)
33297 {
33298 /* Empty entry. */
33299 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
33300 continue;
33301 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
33302 break;
33303 }
33304
33305 if (i == nb_allowed_archs)
33306 {
33307 as_bad (_("architectural extension `%s' is not allowed for the "
33308 "current base architecture"), name);
33309 break;
33310 }
33311
33312 if (adding_value)
33313 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33314 opt->merge_value);
33315 else
33316 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
33317
33318 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33319 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33320 *input_line_pointer = saved_char;
33321 demand_empty_rest_of_line ();
33322 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
33323 on this return so that duplicate extensions (extensions with the
33324 same name as a previous extension in the list) are not considered
33325 for command-line parsing. */
33326 return;
33327 }
33328
33329 if (opt->name == NULL)
33330 as_bad (_("unknown architecture extension `%s'\n"), name);
33331
33332 *input_line_pointer = saved_char;
33333 ignore_rest_of_line ();
33334 }
33335
33336 /* Parse a .fpu directive. */
33337
33338 static void
33339 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
33340 {
33341 const struct arm_option_fpu_value_table *opt;
33342 char saved_char;
33343 char *name;
33344
33345 name = input_line_pointer;
33346 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33347 input_line_pointer++;
33348 saved_char = *input_line_pointer;
33349 *input_line_pointer = 0;
33350
33351 for (opt = arm_fpus; opt->name != NULL; opt++)
33352 if (streq (opt->name, name))
33353 {
33354 selected_fpu = opt->value;
33355 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
33356 #ifndef CPU_DEFAULT
33357 if (no_cpu_selected ())
33358 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
33359 else
33360 #endif
33361 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33362 *input_line_pointer = saved_char;
33363 demand_empty_rest_of_line ();
33364 return;
33365 }
33366
33367 as_bad (_("unknown floating point format `%s'\n"), name);
33368 *input_line_pointer = saved_char;
33369 ignore_rest_of_line ();
33370 }
33371
33372 /* Copy symbol information. */
33373
33374 void
33375 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
33376 {
33377 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
33378 }
33379
33380 #ifdef OBJ_ELF
33381 /* Given a symbolic attribute NAME, return the proper integer value.
33382 Returns -1 if the attribute is not known. */
33383
33384 int
33385 arm_convert_symbolic_attribute (const char *name)
33386 {
33387 static const struct
33388 {
33389 const char * name;
33390 const int tag;
33391 }
33392 attribute_table[] =
33393 {
33394 /* When you modify this table you should
33395 also modify the list in doc/c-arm.texi. */
33396 #define T(tag) {#tag, tag}
33397 T (Tag_CPU_raw_name),
33398 T (Tag_CPU_name),
33399 T (Tag_CPU_arch),
33400 T (Tag_CPU_arch_profile),
33401 T (Tag_ARM_ISA_use),
33402 T (Tag_THUMB_ISA_use),
33403 T (Tag_FP_arch),
33404 T (Tag_VFP_arch),
33405 T (Tag_WMMX_arch),
33406 T (Tag_Advanced_SIMD_arch),
33407 T (Tag_PCS_config),
33408 T (Tag_ABI_PCS_R9_use),
33409 T (Tag_ABI_PCS_RW_data),
33410 T (Tag_ABI_PCS_RO_data),
33411 T (Tag_ABI_PCS_GOT_use),
33412 T (Tag_ABI_PCS_wchar_t),
33413 T (Tag_ABI_FP_rounding),
33414 T (Tag_ABI_FP_denormal),
33415 T (Tag_ABI_FP_exceptions),
33416 T (Tag_ABI_FP_user_exceptions),
33417 T (Tag_ABI_FP_number_model),
33418 T (Tag_ABI_align_needed),
33419 T (Tag_ABI_align8_needed),
33420 T (Tag_ABI_align_preserved),
33421 T (Tag_ABI_align8_preserved),
33422 T (Tag_ABI_enum_size),
33423 T (Tag_ABI_HardFP_use),
33424 T (Tag_ABI_VFP_args),
33425 T (Tag_ABI_WMMX_args),
33426 T (Tag_ABI_optimization_goals),
33427 T (Tag_ABI_FP_optimization_goals),
33428 T (Tag_compatibility),
33429 T (Tag_CPU_unaligned_access),
33430 T (Tag_FP_HP_extension),
33431 T (Tag_VFP_HP_extension),
33432 T (Tag_ABI_FP_16bit_format),
33433 T (Tag_MPextension_use),
33434 T (Tag_DIV_use),
33435 T (Tag_nodefaults),
33436 T (Tag_also_compatible_with),
33437 T (Tag_conformance),
33438 T (Tag_T2EE_use),
33439 T (Tag_Virtualization_use),
33440 T (Tag_DSP_extension),
33441 T (Tag_MVE_arch),
33442 /* We deliberately do not include Tag_MPextension_use_legacy. */
33443 #undef T
33444 };
33445 unsigned int i;
33446
33447 if (name == NULL)
33448 return -1;
33449
33450 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
33451 if (streq (name, attribute_table[i].name))
33452 return attribute_table[i].tag;
33453
33454 return -1;
33455 }
33456
33457 /* Apply sym value for relocations only in the case that they are for
33458 local symbols in the same segment as the fixup and you have the
33459 respective architectural feature for blx and simple switches. */
33460
33461 int
33462 arm_apply_sym_value (struct fix * fixP, segT this_seg)
33463 {
33464 if (fixP->fx_addsy
33465 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
33466 /* PR 17444: If the local symbol is in a different section then a reloc
33467 will always be generated for it, so applying the symbol value now
33468 will result in a double offset being stored in the relocation. */
33469 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
33470 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
33471 {
33472 switch (fixP->fx_r_type)
33473 {
33474 case BFD_RELOC_ARM_PCREL_BLX:
33475 case BFD_RELOC_THUMB_PCREL_BRANCH23:
33476 if (ARM_IS_FUNC (fixP->fx_addsy))
33477 return 1;
33478 break;
33479
33480 case BFD_RELOC_ARM_PCREL_CALL:
33481 case BFD_RELOC_THUMB_PCREL_BLX:
33482 if (THUMB_IS_FUNC (fixP->fx_addsy))
33483 return 1;
33484 break;
33485
33486 default:
33487 break;
33488 }
33489
33490 }
33491 return 0;
33492 }
33493 #endif /* OBJ_ELF */
This page took 1.221041 seconds and 4 git commands to generate.