[binutils][arm] BFloat16 enablement [4/X]
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
... / ...
CommitLineData
1/* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35#include "cpu-arm.h"
36
37#ifdef OBJ_ELF
38#include "elf/arm.h"
39#include "dw2gencfi.h"
40#endif
41
42#include "dwarf2dbg.h"
43
44#ifdef OBJ_ELF
45/* Must be at least the size of the largest unwind opcode (currently two). */
46#define ARM_OPCODE_CHUNK_SIZE 8
47
48/* This structure holds the unwinding state. */
49
50static struct
51{
52 symbolS * proc_start;
53 symbolS * table_entry;
54 symbolS * personality_routine;
55 int personality_index;
56 /* The segment containing the function. */
57 segT saved_seg;
58 subsegT saved_subseg;
59 /* Opcodes generated from this function. */
60 unsigned char * opcodes;
61 int opcode_count;
62 int opcode_alloc;
63 /* The number of bytes pushed to the stack. */
64 offsetT frame_size;
65 /* We don't add stack adjustment opcodes immediately so that we can merge
66 multiple adjustments. We can also omit the final adjustment
67 when using a frame pointer. */
68 offsetT pending_offset;
69 /* These two fields are set by both unwind_movsp and unwind_setfp. They
70 hold the reg+offset to use when restoring sp from a frame pointer. */
71 offsetT fp_offset;
72 int fp_reg;
73 /* Nonzero if an unwind_setfp directive has been seen. */
74 unsigned fp_used:1;
75 /* Nonzero if the last opcode restores sp from fp_reg. */
76 unsigned sp_restored:1;
77} unwind;
78
79/* Whether --fdpic was given. */
80static int arm_fdpic;
81
82#endif /* OBJ_ELF */
83
84/* Results from operand parsing worker functions. */
85
86typedef enum
87{
88 PARSE_OPERAND_SUCCESS,
89 PARSE_OPERAND_FAIL,
90 PARSE_OPERAND_FAIL_NO_BACKTRACK
91} parse_operand_result;
92
93enum arm_float_abi
94{
95 ARM_FLOAT_ABI_HARD,
96 ARM_FLOAT_ABI_SOFTFP,
97 ARM_FLOAT_ABI_SOFT
98};
99
100/* Types of processor to assemble for. */
101#ifndef CPU_DEFAULT
102/* The code that was here used to select a default CPU depending on compiler
103 pre-defines which were only present when doing native builds, thus
104 changing gas' default behaviour depending upon the build host.
105
106 If you have a target that requires a default CPU option then the you
107 should define CPU_DEFAULT here. */
108#endif
109
110/* Perform range checks on positive and negative overflows by checking if the
111 VALUE given fits within the range of an BITS sized immediate. */
112static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
113 {
114 gas_assert (bits < (offsetT)(sizeof (value) * 8));
115 return (value & ~((1 << bits)-1))
116 && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117}
118
119#ifndef FPU_DEFAULT
120# ifdef TE_LINUX
121# define FPU_DEFAULT FPU_ARCH_FPA
122# elif defined (TE_NetBSD)
123# ifdef OBJ_ELF
124# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
125# else
126 /* Legacy a.out format. */
127# define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
128# endif
129# elif defined (TE_VXWORKS)
130# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
131# else
132 /* For backwards compatibility, default to FPA. */
133# define FPU_DEFAULT FPU_ARCH_FPA
134# endif
135#endif /* ifndef FPU_DEFAULT */
136
137#define streq(a, b) (strcmp (a, b) == 0)
138
139/* Current set of feature bits available (CPU+FPU). Different from
140 selected_cpu + selected_fpu in case of autodetection since the CPU
141 feature bits are then all set. */
142static arm_feature_set cpu_variant;
143/* Feature bits used in each execution state. Used to set build attribute
144 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
145static arm_feature_set arm_arch_used;
146static arm_feature_set thumb_arch_used;
147
148/* Flags stored in private area of BFD structure. */
149static int uses_apcs_26 = FALSE;
150static int atpcs = FALSE;
151static int support_interwork = FALSE;
152static int uses_apcs_float = FALSE;
153static int pic_code = FALSE;
154static int fix_v4bx = FALSE;
155/* Warn on using deprecated features. */
156static int warn_on_deprecated = TRUE;
157
158/* Understand CodeComposer Studio assembly syntax. */
159bfd_boolean codecomposer_syntax = FALSE;
160
161/* Variables that we set while parsing command-line options. Once all
162 options have been read we re-process these values to set the real
163 assembly flags. */
164
165/* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
166 instead of -mcpu=arm1). */
167static const arm_feature_set *legacy_cpu = NULL;
168static const arm_feature_set *legacy_fpu = NULL;
169
170/* CPU, extension and FPU feature bits selected by -mcpu. */
171static const arm_feature_set *mcpu_cpu_opt = NULL;
172static arm_feature_set *mcpu_ext_opt = NULL;
173static const arm_feature_set *mcpu_fpu_opt = NULL;
174
175/* CPU, extension and FPU feature bits selected by -march. */
176static const arm_feature_set *march_cpu_opt = NULL;
177static arm_feature_set *march_ext_opt = NULL;
178static const arm_feature_set *march_fpu_opt = NULL;
179
180/* Feature bits selected by -mfpu. */
181static const arm_feature_set *mfpu_opt = NULL;
182
183/* Constants for known architecture features. */
184static const arm_feature_set fpu_default = FPU_DEFAULT;
185static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
186static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
187static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
188static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
189static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
190static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
191#ifdef OBJ_ELF
192static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
193#endif
194static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
195
196#ifdef CPU_DEFAULT
197static const arm_feature_set cpu_default = CPU_DEFAULT;
198#endif
199
200static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
201static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
202static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
203static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
204static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
205static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
206static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
207static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
208static const arm_feature_set arm_ext_v4t_5 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
210static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
211static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
212static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
213static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
214static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
215static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
216static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
217/* Only for compatability of hint instructions. */
218static const arm_feature_set arm_ext_v6k_v6t2 =
219 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
220static const arm_feature_set arm_ext_v6_notm =
221 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
222static const arm_feature_set arm_ext_v6_dsp =
223 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
224static const arm_feature_set arm_ext_barrier =
225 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
226static const arm_feature_set arm_ext_msr =
227 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
228static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
229static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
230static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
231static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
232#ifdef OBJ_ELF
233static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
234#endif
235static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
236static const arm_feature_set arm_ext_m =
237 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
238 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
239static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
240static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
241static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
242static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
243static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
244static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
245static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
246static const arm_feature_set arm_ext_v8m_main =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
248static const arm_feature_set arm_ext_v8_1m_main =
249ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
250/* Instructions in ARMv8-M only found in M profile architectures. */
251static const arm_feature_set arm_ext_v8m_m_only =
252 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
253static const arm_feature_set arm_ext_v6t2_v8m =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
255/* Instructions shared between ARMv8-A and ARMv8-M. */
256static const arm_feature_set arm_ext_atomics =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
258#ifdef OBJ_ELF
259/* DSP instructions Tag_DSP_extension refers to. */
260static const arm_feature_set arm_ext_dsp =
261 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
262#endif
263static const arm_feature_set arm_ext_ras =
264 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
265/* FP16 instructions. */
266static const arm_feature_set arm_ext_fp16 =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
268static const arm_feature_set arm_ext_fp16_fml =
269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
270static const arm_feature_set arm_ext_v8_2 =
271 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
272static const arm_feature_set arm_ext_v8_3 =
273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
274static const arm_feature_set arm_ext_sb =
275 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
276static const arm_feature_set arm_ext_predres =
277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
278static const arm_feature_set arm_ext_bf16 =
279 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
280
281static const arm_feature_set arm_arch_any = ARM_ANY;
282#ifdef OBJ_ELF
283static const arm_feature_set fpu_any = FPU_ANY;
284#endif
285static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
286static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
287static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
288
289static const arm_feature_set arm_cext_iwmmxt2 =
290 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
291static const arm_feature_set arm_cext_iwmmxt =
292 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
293static const arm_feature_set arm_cext_xscale =
294 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
295static const arm_feature_set arm_cext_maverick =
296 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
297static const arm_feature_set fpu_fpa_ext_v1 =
298 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
299static const arm_feature_set fpu_fpa_ext_v2 =
300 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
301static const arm_feature_set fpu_vfp_ext_v1xd =
302 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
303static const arm_feature_set fpu_vfp_ext_v1 =
304 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
305static const arm_feature_set fpu_vfp_ext_v2 =
306 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
307static const arm_feature_set fpu_vfp_ext_v3xd =
308 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
309static const arm_feature_set fpu_vfp_ext_v3 =
310 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
311static const arm_feature_set fpu_vfp_ext_d32 =
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
313static const arm_feature_set fpu_neon_ext_v1 =
314 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
315static const arm_feature_set fpu_vfp_v3_or_neon_ext =
316 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
317static const arm_feature_set mve_ext =
318 ARM_FEATURE_COPROC (FPU_MVE);
319static const arm_feature_set mve_fp_ext =
320 ARM_FEATURE_COPROC (FPU_MVE_FP);
321#ifdef OBJ_ELF
322static const arm_feature_set fpu_vfp_fp16 =
323 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
324static const arm_feature_set fpu_neon_ext_fma =
325 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
326#endif
327static const arm_feature_set fpu_vfp_ext_fma =
328 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
329static const arm_feature_set fpu_vfp_ext_armv8 =
330 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
331static const arm_feature_set fpu_vfp_ext_armv8xd =
332 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
333static const arm_feature_set fpu_neon_ext_armv8 =
334 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
335static const arm_feature_set fpu_crypto_ext_armv8 =
336 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
337static const arm_feature_set crc_ext_armv8 =
338 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
339static const arm_feature_set fpu_neon_ext_v8_1 =
340 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
341static const arm_feature_set fpu_neon_ext_dotprod =
342 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
343
344static int mfloat_abi_opt = -1;
345/* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
346 directive. */
347static arm_feature_set selected_arch = ARM_ARCH_NONE;
348/* Extension feature bits selected by the last -mcpu/-march or .arch_extension
349 directive. */
350static arm_feature_set selected_ext = ARM_ARCH_NONE;
351/* Feature bits selected by the last -mcpu/-march or by the combination of the
352 last .cpu/.arch directive .arch_extension directives since that
353 directive. */
354static arm_feature_set selected_cpu = ARM_ARCH_NONE;
355/* FPU feature bits selected by the last -mfpu or .fpu directive. */
356static arm_feature_set selected_fpu = FPU_NONE;
357/* Feature bits selected by the last .object_arch directive. */
358static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
359/* Must be long enough to hold any of the names in arm_cpus. */
360static const struct arm_ext_table * selected_ctx_ext_table = NULL;
361static char selected_cpu_name[20];
362
363extern FLONUM_TYPE generic_floating_point_number;
364
365/* Return if no cpu was selected on command-line. */
366static bfd_boolean
367no_cpu_selected (void)
368{
369 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
370}
371
372#ifdef OBJ_ELF
373# ifdef EABI_DEFAULT
374static int meabi_flags = EABI_DEFAULT;
375# else
376static int meabi_flags = EF_ARM_EABI_UNKNOWN;
377# endif
378
379static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
380
381bfd_boolean
382arm_is_eabi (void)
383{
384 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
385}
386#endif
387
388#ifdef OBJ_ELF
389/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
390symbolS * GOT_symbol;
391#endif
392
393/* 0: assemble for ARM,
394 1: assemble for Thumb,
395 2: assemble for Thumb even though target CPU does not support thumb
396 instructions. */
397static int thumb_mode = 0;
398/* A value distinct from the possible values for thumb_mode that we
399 can use to record whether thumb_mode has been copied into the
400 tc_frag_data field of a frag. */
401#define MODE_RECORDED (1 << 4)
402
403/* Specifies the intrinsic IT insn behavior mode. */
404enum implicit_it_mode
405{
406 IMPLICIT_IT_MODE_NEVER = 0x00,
407 IMPLICIT_IT_MODE_ARM = 0x01,
408 IMPLICIT_IT_MODE_THUMB = 0x02,
409 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
410};
411static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
412
413/* If unified_syntax is true, we are processing the new unified
414 ARM/Thumb syntax. Important differences from the old ARM mode:
415
416 - Immediate operands do not require a # prefix.
417 - Conditional affixes always appear at the end of the
418 instruction. (For backward compatibility, those instructions
419 that formerly had them in the middle, continue to accept them
420 there.)
421 - The IT instruction may appear, and if it does is validated
422 against subsequent conditional affixes. It does not generate
423 machine code.
424
425 Important differences from the old Thumb mode:
426
427 - Immediate operands do not require a # prefix.
428 - Most of the V6T2 instructions are only available in unified mode.
429 - The .N and .W suffixes are recognized and honored (it is an error
430 if they cannot be honored).
431 - All instructions set the flags if and only if they have an 's' affix.
432 - Conditional affixes may be used. They are validated against
433 preceding IT instructions. Unlike ARM mode, you cannot use a
434 conditional affix except in the scope of an IT instruction. */
435
436static bfd_boolean unified_syntax = FALSE;
437
438/* An immediate operand can start with #, and ld*, st*, pld operands
439 can contain [ and ]. We need to tell APP not to elide whitespace
440 before a [, which can appear as the first operand for pld.
441 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
442const char arm_symbol_chars[] = "#[]{}";
443
444enum neon_el_type
445{
446 NT_invtype,
447 NT_untyped,
448 NT_integer,
449 NT_float,
450 NT_poly,
451 NT_signed,
452 NT_bfloat,
453 NT_unsigned
454};
455
456struct neon_type_el
457{
458 enum neon_el_type type;
459 unsigned size;
460};
461
462#define NEON_MAX_TYPE_ELS 4
463
464struct neon_type
465{
466 struct neon_type_el el[NEON_MAX_TYPE_ELS];
467 unsigned elems;
468};
469
470enum pred_instruction_type
471{
472 OUTSIDE_PRED_INSN,
473 INSIDE_VPT_INSN,
474 INSIDE_IT_INSN,
475 INSIDE_IT_LAST_INSN,
476 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
477 if inside, should be the last one. */
478 NEUTRAL_IT_INSN, /* This could be either inside or outside,
479 i.e. BKPT and NOP. */
480 IT_INSN, /* The IT insn has been parsed. */
481 VPT_INSN, /* The VPT/VPST insn has been parsed. */
482 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
483 a predication code. */
484 MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
485};
486
487/* The maximum number of operands we need. */
488#define ARM_IT_MAX_OPERANDS 6
489#define ARM_IT_MAX_RELOCS 3
490
491struct arm_it
492{
493 const char * error;
494 unsigned long instruction;
495 int size;
496 int size_req;
497 int cond;
498 /* "uncond_value" is set to the value in place of the conditional field in
499 unconditional versions of the instruction, or -1 if nothing is
500 appropriate. */
501 int uncond_value;
502 struct neon_type vectype;
503 /* This does not indicate an actual NEON instruction, only that
504 the mnemonic accepts neon-style type suffixes. */
505 int is_neon;
506 /* Set to the opcode if the instruction needs relaxation.
507 Zero if the instruction is not relaxed. */
508 unsigned long relax;
509 struct
510 {
511 bfd_reloc_code_real_type type;
512 expressionS exp;
513 int pc_rel;
514 } relocs[ARM_IT_MAX_RELOCS];
515
516 enum pred_instruction_type pred_insn_type;
517
518 struct
519 {
520 unsigned reg;
521 signed int imm;
522 struct neon_type_el vectype;
523 unsigned present : 1; /* Operand present. */
524 unsigned isreg : 1; /* Operand was a register. */
525 unsigned immisreg : 2; /* .imm field is a second register.
526 0: imm, 1: gpr, 2: MVE Q-register. */
527 unsigned isscalar : 2; /* Operand is a (SIMD) scalar:
528 0) not scalar,
529 1) Neon scalar,
530 2) MVE scalar. */
531 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
532 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
533 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
534 instructions. This allows us to disambiguate ARM <-> vector insns. */
535 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
536 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
537 unsigned isquad : 1; /* Operand is SIMD quad register. */
538 unsigned issingle : 1; /* Operand is VFP single-precision register. */
539 unsigned iszr : 1; /* Operand is ZR register. */
540 unsigned hasreloc : 1; /* Operand has relocation suffix. */
541 unsigned writeback : 1; /* Operand has trailing ! */
542 unsigned preind : 1; /* Preindexed address. */
543 unsigned postind : 1; /* Postindexed address. */
544 unsigned negative : 1; /* Index register was negated. */
545 unsigned shifted : 1; /* Shift applied to operation. */
546 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
547 } operands[ARM_IT_MAX_OPERANDS];
548};
549
550static struct arm_it inst;
551
552#define NUM_FLOAT_VALS 8
553
554const char * fp_const[] =
555{
556 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
557};
558
559LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
560
561#define FAIL (-1)
562#define SUCCESS (0)
563
564#define SUFF_S 1
565#define SUFF_D 2
566#define SUFF_E 3
567#define SUFF_P 4
568
569#define CP_T_X 0x00008000
570#define CP_T_Y 0x00400000
571
572#define CONDS_BIT 0x00100000
573#define LOAD_BIT 0x00100000
574
575#define DOUBLE_LOAD_FLAG 0x00000001
576
577struct asm_cond
578{
579 const char * template_name;
580 unsigned long value;
581};
582
583#define COND_ALWAYS 0xE
584
585struct asm_psr
586{
587 const char * template_name;
588 unsigned long field;
589};
590
591struct asm_barrier_opt
592{
593 const char * template_name;
594 unsigned long value;
595 const arm_feature_set arch;
596};
597
598/* The bit that distinguishes CPSR and SPSR. */
599#define SPSR_BIT (1 << 22)
600
601/* The individual PSR flag bits. */
602#define PSR_c (1 << 16)
603#define PSR_x (1 << 17)
604#define PSR_s (1 << 18)
605#define PSR_f (1 << 19)
606
607struct reloc_entry
608{
609 const char * name;
610 bfd_reloc_code_real_type reloc;
611};
612
613enum vfp_reg_pos
614{
615 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
616 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
617};
618
619enum vfp_ldstm_type
620{
621 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
622};
623
624/* Bits for DEFINED field in neon_typed_alias. */
625#define NTA_HASTYPE 1
626#define NTA_HASINDEX 2
627
628struct neon_typed_alias
629{
630 unsigned char defined;
631 unsigned char index;
632 struct neon_type_el eltype;
633};
634
635/* ARM register categories. This includes coprocessor numbers and various
636 architecture extensions' registers. Each entry should have an error message
637 in reg_expected_msgs below. */
638enum arm_reg_type
639{
640 REG_TYPE_RN,
641 REG_TYPE_CP,
642 REG_TYPE_CN,
643 REG_TYPE_FN,
644 REG_TYPE_VFS,
645 REG_TYPE_VFD,
646 REG_TYPE_NQ,
647 REG_TYPE_VFSD,
648 REG_TYPE_NDQ,
649 REG_TYPE_NSD,
650 REG_TYPE_NSDQ,
651 REG_TYPE_VFC,
652 REG_TYPE_MVF,
653 REG_TYPE_MVD,
654 REG_TYPE_MVFX,
655 REG_TYPE_MVDX,
656 REG_TYPE_MVAX,
657 REG_TYPE_MQ,
658 REG_TYPE_DSPSC,
659 REG_TYPE_MMXWR,
660 REG_TYPE_MMXWC,
661 REG_TYPE_MMXWCG,
662 REG_TYPE_XSCALE,
663 REG_TYPE_RNB,
664 REG_TYPE_ZR
665};
666
667/* Structure for a hash table entry for a register.
668 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
669 information which states whether a vector type or index is specified (for a
670 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
671struct reg_entry
672{
673 const char * name;
674 unsigned int number;
675 unsigned char type;
676 unsigned char builtin;
677 struct neon_typed_alias * neon;
678};
679
680/* Diagnostics used when we don't get a register of the expected type. */
681const char * const reg_expected_msgs[] =
682{
683 [REG_TYPE_RN] = N_("ARM register expected"),
684 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
685 [REG_TYPE_CN] = N_("co-processor register expected"),
686 [REG_TYPE_FN] = N_("FPA register expected"),
687 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
688 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
689 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
690 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
691 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
692 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
693 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
694 " expected"),
695 [REG_TYPE_VFC] = N_("VFP system register expected"),
696 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
697 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
698 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
699 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
700 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
701 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
702 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
703 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
704 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
705 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
706 [REG_TYPE_MQ] = N_("MVE vector register expected"),
707 [REG_TYPE_RNB] = N_("")
708};
709
710/* Some well known registers that we refer to directly elsewhere. */
711#define REG_R12 12
712#define REG_SP 13
713#define REG_LR 14
714#define REG_PC 15
715
716/* ARM instructions take 4bytes in the object file, Thumb instructions
717 take 2: */
718#define INSN_SIZE 4
719
720struct asm_opcode
721{
722 /* Basic string to match. */
723 const char * template_name;
724
725 /* Parameters to instruction. */
726 unsigned int operands[8];
727
728 /* Conditional tag - see opcode_lookup. */
729 unsigned int tag : 4;
730
731 /* Basic instruction code. */
732 unsigned int avalue;
733
734 /* Thumb-format instruction code. */
735 unsigned int tvalue;
736
737 /* Which architecture variant provides this instruction. */
738 const arm_feature_set * avariant;
739 const arm_feature_set * tvariant;
740
741 /* Function to call to encode instruction in ARM format. */
742 void (* aencode) (void);
743
744 /* Function to call to encode instruction in Thumb format. */
745 void (* tencode) (void);
746
747 /* Indicates whether this instruction may be vector predicated. */
748 unsigned int mayBeVecPred : 1;
749};
750
751/* Defines for various bits that we will want to toggle. */
752#define INST_IMMEDIATE 0x02000000
753#define OFFSET_REG 0x02000000
754#define HWOFFSET_IMM 0x00400000
755#define SHIFT_BY_REG 0x00000010
756#define PRE_INDEX 0x01000000
757#define INDEX_UP 0x00800000
758#define WRITE_BACK 0x00200000
759#define LDM_TYPE_2_OR_3 0x00400000
760#define CPSI_MMOD 0x00020000
761
762#define LITERAL_MASK 0xf000f000
763#define OPCODE_MASK 0xfe1fffff
764#define V4_STR_BIT 0x00000020
765#define VLDR_VMOV_SAME 0x0040f000
766
767#define T2_SUBS_PC_LR 0xf3de8f00
768
769#define DATA_OP_SHIFT 21
770#define SBIT_SHIFT 20
771
772#define T2_OPCODE_MASK 0xfe1fffff
773#define T2_DATA_OP_SHIFT 21
774#define T2_SBIT_SHIFT 20
775
776#define A_COND_MASK 0xf0000000
777#define A_PUSH_POP_OP_MASK 0x0fff0000
778
779/* Opcodes for pushing/poping registers to/from the stack. */
780#define A1_OPCODE_PUSH 0x092d0000
781#define A2_OPCODE_PUSH 0x052d0004
782#define A2_OPCODE_POP 0x049d0004
783
784/* Codes to distinguish the arithmetic instructions. */
785#define OPCODE_AND 0
786#define OPCODE_EOR 1
787#define OPCODE_SUB 2
788#define OPCODE_RSB 3
789#define OPCODE_ADD 4
790#define OPCODE_ADC 5
791#define OPCODE_SBC 6
792#define OPCODE_RSC 7
793#define OPCODE_TST 8
794#define OPCODE_TEQ 9
795#define OPCODE_CMP 10
796#define OPCODE_CMN 11
797#define OPCODE_ORR 12
798#define OPCODE_MOV 13
799#define OPCODE_BIC 14
800#define OPCODE_MVN 15
801
802#define T2_OPCODE_AND 0
803#define T2_OPCODE_BIC 1
804#define T2_OPCODE_ORR 2
805#define T2_OPCODE_ORN 3
806#define T2_OPCODE_EOR 4
807#define T2_OPCODE_ADD 8
808#define T2_OPCODE_ADC 10
809#define T2_OPCODE_SBC 11
810#define T2_OPCODE_SUB 13
811#define T2_OPCODE_RSB 14
812
813#define T_OPCODE_MUL 0x4340
814#define T_OPCODE_TST 0x4200
815#define T_OPCODE_CMN 0x42c0
816#define T_OPCODE_NEG 0x4240
817#define T_OPCODE_MVN 0x43c0
818
819#define T_OPCODE_ADD_R3 0x1800
820#define T_OPCODE_SUB_R3 0x1a00
821#define T_OPCODE_ADD_HI 0x4400
822#define T_OPCODE_ADD_ST 0xb000
823#define T_OPCODE_SUB_ST 0xb080
824#define T_OPCODE_ADD_SP 0xa800
825#define T_OPCODE_ADD_PC 0xa000
826#define T_OPCODE_ADD_I8 0x3000
827#define T_OPCODE_SUB_I8 0x3800
828#define T_OPCODE_ADD_I3 0x1c00
829#define T_OPCODE_SUB_I3 0x1e00
830
831#define T_OPCODE_ASR_R 0x4100
832#define T_OPCODE_LSL_R 0x4080
833#define T_OPCODE_LSR_R 0x40c0
834#define T_OPCODE_ROR_R 0x41c0
835#define T_OPCODE_ASR_I 0x1000
836#define T_OPCODE_LSL_I 0x0000
837#define T_OPCODE_LSR_I 0x0800
838
839#define T_OPCODE_MOV_I8 0x2000
840#define T_OPCODE_CMP_I8 0x2800
841#define T_OPCODE_CMP_LR 0x4280
842#define T_OPCODE_MOV_HR 0x4600
843#define T_OPCODE_CMP_HR 0x4500
844
845#define T_OPCODE_LDR_PC 0x4800
846#define T_OPCODE_LDR_SP 0x9800
847#define T_OPCODE_STR_SP 0x9000
848#define T_OPCODE_LDR_IW 0x6800
849#define T_OPCODE_STR_IW 0x6000
850#define T_OPCODE_LDR_IH 0x8800
851#define T_OPCODE_STR_IH 0x8000
852#define T_OPCODE_LDR_IB 0x7800
853#define T_OPCODE_STR_IB 0x7000
854#define T_OPCODE_LDR_RW 0x5800
855#define T_OPCODE_STR_RW 0x5000
856#define T_OPCODE_LDR_RH 0x5a00
857#define T_OPCODE_STR_RH 0x5200
858#define T_OPCODE_LDR_RB 0x5c00
859#define T_OPCODE_STR_RB 0x5400
860
861#define T_OPCODE_PUSH 0xb400
862#define T_OPCODE_POP 0xbc00
863
864#define T_OPCODE_BRANCH 0xe000
865
866#define THUMB_SIZE 2 /* Size of thumb instruction. */
867#define THUMB_PP_PC_LR 0x0100
868#define THUMB_LOAD_BIT 0x0800
869#define THUMB2_LOAD_BIT 0x00100000
870
871#define BAD_SYNTAX _("syntax error")
872#define BAD_ARGS _("bad arguments to instruction")
873#define BAD_SP _("r13 not allowed here")
874#define BAD_PC _("r15 not allowed here")
875#define BAD_ODD _("Odd register not allowed here")
876#define BAD_EVEN _("Even register not allowed here")
877#define BAD_COND _("instruction cannot be conditional")
878#define BAD_OVERLAP _("registers may not be the same")
879#define BAD_HIREG _("lo register required")
880#define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
881#define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
882#define BAD_BRANCH _("branch must be last instruction in IT block")
883#define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
884#define BAD_NOT_IT _("instruction not allowed in IT block")
885#define BAD_NOT_VPT _("instruction missing MVE vector predication code")
886#define BAD_FPU _("selected FPU does not support instruction")
887#define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
888#define BAD_OUT_VPT \
889 _("vector predicated instruction should be in VPT/VPST block")
890#define BAD_IT_COND _("incorrect condition in IT block")
891#define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
892#define BAD_IT_IT _("IT falling in the range of a previous IT block")
893#define MISSING_FNSTART _("missing .fnstart before unwinding directive")
894#define BAD_PC_ADDRESSING \
895 _("cannot use register index with PC-relative addressing")
896#define BAD_PC_WRITEBACK \
897 _("cannot use writeback with PC-relative addressing")
898#define BAD_RANGE _("branch out of range")
899#define BAD_FP16 _("selected processor does not support fp16 instruction")
900#define BAD_BF16 _("selected processor does not support bf16 instruction")
901#define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
902#define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
903#define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
904 "block")
905#define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
906 "block")
907#define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
908 " operand")
909#define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
910 " operand")
911#define BAD_SIMD_TYPE _("bad type in SIMD instruction")
912#define BAD_MVE_AUTO \
913 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
914 " use a valid -march or -mcpu option.")
915#define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
916 "and source operands makes instruction UNPREDICTABLE")
917#define BAD_EL_TYPE _("bad element type for instruction")
918#define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
919
920static struct hash_control * arm_ops_hsh;
921static struct hash_control * arm_cond_hsh;
922static struct hash_control * arm_vcond_hsh;
923static struct hash_control * arm_shift_hsh;
924static struct hash_control * arm_psr_hsh;
925static struct hash_control * arm_v7m_psr_hsh;
926static struct hash_control * arm_reg_hsh;
927static struct hash_control * arm_reloc_hsh;
928static struct hash_control * arm_barrier_opt_hsh;
929
930/* Stuff needed to resolve the label ambiguity
931 As:
932 ...
933 label: <insn>
934 may differ from:
935 ...
936 label:
937 <insn> */
938
939symbolS * last_label_seen;
940static int label_is_thumb_function_name = FALSE;
941
942/* Literal pool structure. Held on a per-section
943 and per-sub-section basis. */
944
945#define MAX_LITERAL_POOL_SIZE 1024
946typedef struct literal_pool
947{
948 expressionS literals [MAX_LITERAL_POOL_SIZE];
949 unsigned int next_free_entry;
950 unsigned int id;
951 symbolS * symbol;
952 segT section;
953 subsegT sub_section;
954#ifdef OBJ_ELF
955 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
956#endif
957 struct literal_pool * next;
958 unsigned int alignment;
959} literal_pool;
960
961/* Pointer to a linked list of literal pools. */
962literal_pool * list_of_pools = NULL;
963
964typedef enum asmfunc_states
965{
966 OUTSIDE_ASMFUNC,
967 WAITING_ASMFUNC_NAME,
968 WAITING_ENDASMFUNC
969} asmfunc_states;
970
971static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
972
973#ifdef OBJ_ELF
974# define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
975#else
976static struct current_pred now_pred;
977#endif
978
979static inline int
980now_pred_compatible (int cond)
981{
982 return (cond & ~1) == (now_pred.cc & ~1);
983}
984
985static inline int
986conditional_insn (void)
987{
988 return inst.cond != COND_ALWAYS;
989}
990
991static int in_pred_block (void);
992
993static int handle_pred_state (void);
994
995static void force_automatic_it_block_close (void);
996
997static void it_fsm_post_encode (void);
998
999#define set_pred_insn_type(type) \
1000 do \
1001 { \
1002 inst.pred_insn_type = type; \
1003 if (handle_pred_state () == FAIL) \
1004 return; \
1005 } \
1006 while (0)
1007
1008#define set_pred_insn_type_nonvoid(type, failret) \
1009 do \
1010 { \
1011 inst.pred_insn_type = type; \
1012 if (handle_pred_state () == FAIL) \
1013 return failret; \
1014 } \
1015 while(0)
1016
1017#define set_pred_insn_type_last() \
1018 do \
1019 { \
1020 if (inst.cond == COND_ALWAYS) \
1021 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1022 else \
1023 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1024 } \
1025 while (0)
1026
1027/* Toggle value[pos]. */
1028#define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1029
1030/* Pure syntax. */
1031
1032/* This array holds the chars that always start a comment. If the
1033 pre-processor is disabled, these aren't very useful. */
1034char arm_comment_chars[] = "@";
1035
1036/* This array holds the chars that only start a comment at the beginning of
1037 a line. If the line seems to have the form '# 123 filename'
1038 .line and .file directives will appear in the pre-processed output. */
1039/* Note that input_file.c hand checks for '#' at the beginning of the
1040 first line of the input file. This is because the compiler outputs
1041 #NO_APP at the beginning of its output. */
1042/* Also note that comments like this one will always work. */
1043const char line_comment_chars[] = "#";
1044
1045char arm_line_separator_chars[] = ";";
1046
1047/* Chars that can be used to separate mant
1048 from exp in floating point numbers. */
1049const char EXP_CHARS[] = "eE";
1050
1051/* Chars that mean this number is a floating point constant. */
1052/* As in 0f12.456 */
1053/* or 0d1.2345e12 */
1054
1055const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1056
1057/* Prefix characters that indicate the start of an immediate
1058 value. */
1059#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1060
1061/* Separator character handling. */
1062
1063#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1064
1065enum fp_16bit_format
1066{
1067 ARM_FP16_FORMAT_IEEE = 0x1,
1068 ARM_FP16_FORMAT_ALTERNATIVE = 0x2,
1069 ARM_FP16_FORMAT_DEFAULT = 0x3
1070};
1071
1072static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1073
1074
1075static inline int
1076skip_past_char (char ** str, char c)
1077{
1078 /* PR gas/14987: Allow for whitespace before the expected character. */
1079 skip_whitespace (*str);
1080
1081 if (**str == c)
1082 {
1083 (*str)++;
1084 return SUCCESS;
1085 }
1086 else
1087 return FAIL;
1088}
1089
1090#define skip_past_comma(str) skip_past_char (str, ',')
1091
1092/* Arithmetic expressions (possibly involving symbols). */
1093
1094/* Return TRUE if anything in the expression is a bignum. */
1095
1096static bfd_boolean
1097walk_no_bignums (symbolS * sp)
1098{
1099 if (symbol_get_value_expression (sp)->X_op == O_big)
1100 return TRUE;
1101
1102 if (symbol_get_value_expression (sp)->X_add_symbol)
1103 {
1104 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1105 || (symbol_get_value_expression (sp)->X_op_symbol
1106 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1107 }
1108
1109 return FALSE;
1110}
1111
1112static bfd_boolean in_my_get_expression = FALSE;
1113
1114/* Third argument to my_get_expression. */
1115#define GE_NO_PREFIX 0
1116#define GE_IMM_PREFIX 1
1117#define GE_OPT_PREFIX 2
1118/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1119 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1120#define GE_OPT_PREFIX_BIG 3
1121
1122static int
1123my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1124{
1125 char * save_in;
1126
1127 /* In unified syntax, all prefixes are optional. */
1128 if (unified_syntax)
1129 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1130 : GE_OPT_PREFIX;
1131
1132 switch (prefix_mode)
1133 {
1134 case GE_NO_PREFIX: break;
1135 case GE_IMM_PREFIX:
1136 if (!is_immediate_prefix (**str))
1137 {
1138 inst.error = _("immediate expression requires a # prefix");
1139 return FAIL;
1140 }
1141 (*str)++;
1142 break;
1143 case GE_OPT_PREFIX:
1144 case GE_OPT_PREFIX_BIG:
1145 if (is_immediate_prefix (**str))
1146 (*str)++;
1147 break;
1148 default:
1149 abort ();
1150 }
1151
1152 memset (ep, 0, sizeof (expressionS));
1153
1154 save_in = input_line_pointer;
1155 input_line_pointer = *str;
1156 in_my_get_expression = TRUE;
1157 expression (ep);
1158 in_my_get_expression = FALSE;
1159
1160 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1161 {
1162 /* We found a bad or missing expression in md_operand(). */
1163 *str = input_line_pointer;
1164 input_line_pointer = save_in;
1165 if (inst.error == NULL)
1166 inst.error = (ep->X_op == O_absent
1167 ? _("missing expression") :_("bad expression"));
1168 return 1;
1169 }
1170
1171 /* Get rid of any bignums now, so that we don't generate an error for which
1172 we can't establish a line number later on. Big numbers are never valid
1173 in instructions, which is where this routine is always called. */
1174 if (prefix_mode != GE_OPT_PREFIX_BIG
1175 && (ep->X_op == O_big
1176 || (ep->X_add_symbol
1177 && (walk_no_bignums (ep->X_add_symbol)
1178 || (ep->X_op_symbol
1179 && walk_no_bignums (ep->X_op_symbol))))))
1180 {
1181 inst.error = _("invalid constant");
1182 *str = input_line_pointer;
1183 input_line_pointer = save_in;
1184 return 1;
1185 }
1186
1187 *str = input_line_pointer;
1188 input_line_pointer = save_in;
1189 return SUCCESS;
1190}
1191
1192/* Turn a string in input_line_pointer into a floating point constant
1193 of type TYPE, and store the appropriate bytes in *LITP. The number
1194 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1195 returned, or NULL on OK.
1196
1197 Note that fp constants aren't represent in the normal way on the ARM.
1198 In big endian mode, things are as expected. However, in little endian
1199 mode fp constants are big-endian word-wise, and little-endian byte-wise
1200 within the words. For example, (double) 1.1 in big endian mode is
1201 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1202 the byte sequence 99 99 f1 3f 9a 99 99 99.
1203
1204 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1205
1206const char *
1207md_atof (int type, char * litP, int * sizeP)
1208{
1209 int prec;
1210 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1211 char *t;
1212 int i;
1213
1214 switch (type)
1215 {
1216 case 'H':
1217 case 'h':
1218 prec = 1;
1219 break;
1220
1221 case 'f':
1222 case 'F':
1223 case 's':
1224 case 'S':
1225 prec = 2;
1226 break;
1227
1228 case 'd':
1229 case 'D':
1230 case 'r':
1231 case 'R':
1232 prec = 4;
1233 break;
1234
1235 case 'x':
1236 case 'X':
1237 prec = 5;
1238 break;
1239
1240 case 'p':
1241 case 'P':
1242 prec = 5;
1243 break;
1244
1245 default:
1246 *sizeP = 0;
1247 return _("Unrecognized or unsupported floating point constant");
1248 }
1249
1250 t = atof_ieee (input_line_pointer, type, words);
1251 if (t)
1252 input_line_pointer = t;
1253 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1254
1255 if (target_big_endian || prec == 1)
1256 for (i = 0; i < prec; i++)
1257 {
1258 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1259 litP += sizeof (LITTLENUM_TYPE);
1260 }
1261 else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1262 for (i = prec - 1; i >= 0; i--)
1263 {
1264 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1265 litP += sizeof (LITTLENUM_TYPE);
1266 }
1267 else
1268 /* For a 4 byte float the order of elements in `words' is 1 0.
1269 For an 8 byte float the order is 1 0 3 2. */
1270 for (i = 0; i < prec; i += 2)
1271 {
1272 md_number_to_chars (litP, (valueT) words[i + 1],
1273 sizeof (LITTLENUM_TYPE));
1274 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1275 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1276 litP += 2 * sizeof (LITTLENUM_TYPE);
1277 }
1278
1279 return NULL;
1280}
1281
1282/* We handle all bad expressions here, so that we can report the faulty
1283 instruction in the error message. */
1284
1285void
1286md_operand (expressionS * exp)
1287{
1288 if (in_my_get_expression)
1289 exp->X_op = O_illegal;
1290}
1291
1292/* Immediate values. */
1293
1294#ifdef OBJ_ELF
1295/* Generic immediate-value read function for use in directives.
1296 Accepts anything that 'expression' can fold to a constant.
1297 *val receives the number. */
1298
1299static int
1300immediate_for_directive (int *val)
1301{
1302 expressionS exp;
1303 exp.X_op = O_illegal;
1304
1305 if (is_immediate_prefix (*input_line_pointer))
1306 {
1307 input_line_pointer++;
1308 expression (&exp);
1309 }
1310
1311 if (exp.X_op != O_constant)
1312 {
1313 as_bad (_("expected #constant"));
1314 ignore_rest_of_line ();
1315 return FAIL;
1316 }
1317 *val = exp.X_add_number;
1318 return SUCCESS;
1319}
1320#endif
1321
1322/* Register parsing. */
1323
1324/* Generic register parser. CCP points to what should be the
1325 beginning of a register name. If it is indeed a valid register
1326 name, advance CCP over it and return the reg_entry structure;
1327 otherwise return NULL. Does not issue diagnostics. */
1328
1329static struct reg_entry *
1330arm_reg_parse_multi (char **ccp)
1331{
1332 char *start = *ccp;
1333 char *p;
1334 struct reg_entry *reg;
1335
1336 skip_whitespace (start);
1337
1338#ifdef REGISTER_PREFIX
1339 if (*start != REGISTER_PREFIX)
1340 return NULL;
1341 start++;
1342#endif
1343#ifdef OPTIONAL_REGISTER_PREFIX
1344 if (*start == OPTIONAL_REGISTER_PREFIX)
1345 start++;
1346#endif
1347
1348 p = start;
1349 if (!ISALPHA (*p) || !is_name_beginner (*p))
1350 return NULL;
1351
1352 do
1353 p++;
1354 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1355
1356 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1357
1358 if (!reg)
1359 return NULL;
1360
1361 *ccp = p;
1362 return reg;
1363}
1364
1365static int
1366arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1367 enum arm_reg_type type)
1368{
1369 /* Alternative syntaxes are accepted for a few register classes. */
1370 switch (type)
1371 {
1372 case REG_TYPE_MVF:
1373 case REG_TYPE_MVD:
1374 case REG_TYPE_MVFX:
1375 case REG_TYPE_MVDX:
1376 /* Generic coprocessor register names are allowed for these. */
1377 if (reg && reg->type == REG_TYPE_CN)
1378 return reg->number;
1379 break;
1380
1381 case REG_TYPE_CP:
1382 /* For backward compatibility, a bare number is valid here. */
1383 {
1384 unsigned long processor = strtoul (start, ccp, 10);
1385 if (*ccp != start && processor <= 15)
1386 return processor;
1387 }
1388 /* Fall through. */
1389
1390 case REG_TYPE_MMXWC:
1391 /* WC includes WCG. ??? I'm not sure this is true for all
1392 instructions that take WC registers. */
1393 if (reg && reg->type == REG_TYPE_MMXWCG)
1394 return reg->number;
1395 break;
1396
1397 default:
1398 break;
1399 }
1400
1401 return FAIL;
1402}
1403
1404/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1405 return value is the register number or FAIL. */
1406
1407static int
1408arm_reg_parse (char **ccp, enum arm_reg_type type)
1409{
1410 char *start = *ccp;
1411 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1412 int ret;
1413
1414 /* Do not allow a scalar (reg+index) to parse as a register. */
1415 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1416 return FAIL;
1417
1418 if (reg && reg->type == type)
1419 return reg->number;
1420
1421 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1422 return ret;
1423
1424 *ccp = start;
1425 return FAIL;
1426}
1427
1428/* Parse a Neon type specifier. *STR should point at the leading '.'
1429 character. Does no verification at this stage that the type fits the opcode
1430 properly. E.g.,
1431
1432 .i32.i32.s16
1433 .s32.f32
1434 .u16
1435
1436 Can all be legally parsed by this function.
1437
1438 Fills in neon_type struct pointer with parsed information, and updates STR
1439 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1440 type, FAIL if not. */
1441
1442static int
1443parse_neon_type (struct neon_type *type, char **str)
1444{
1445 char *ptr = *str;
1446
1447 if (type)
1448 type->elems = 0;
1449
1450 while (type->elems < NEON_MAX_TYPE_ELS)
1451 {
1452 enum neon_el_type thistype = NT_untyped;
1453 unsigned thissize = -1u;
1454
1455 if (*ptr != '.')
1456 break;
1457
1458 ptr++;
1459
1460 /* Just a size without an explicit type. */
1461 if (ISDIGIT (*ptr))
1462 goto parsesize;
1463
1464 switch (TOLOWER (*ptr))
1465 {
1466 case 'i': thistype = NT_integer; break;
1467 case 'f': thistype = NT_float; break;
1468 case 'p': thistype = NT_poly; break;
1469 case 's': thistype = NT_signed; break;
1470 case 'u': thistype = NT_unsigned; break;
1471 case 'd':
1472 thistype = NT_float;
1473 thissize = 64;
1474 ptr++;
1475 goto done;
1476 case 'b':
1477 thistype = NT_bfloat;
1478 switch (TOLOWER (*(++ptr)))
1479 {
1480 case 'f':
1481 ptr += 1;
1482 thissize = strtoul (ptr, &ptr, 10);
1483 if (thissize != 16)
1484 {
1485 as_bad (_("bad size %d in type specifier"), thissize);
1486 return FAIL;
1487 }
1488 goto done;
1489 case '0': case '1': case '2': case '3': case '4':
1490 case '5': case '6': case '7': case '8': case '9':
1491 case ' ': case '.':
1492 as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1493 return FAIL;
1494 default:
1495 break;
1496 }
1497 break;
1498 default:
1499 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1500 return FAIL;
1501 }
1502
1503 ptr++;
1504
1505 /* .f is an abbreviation for .f32. */
1506 if (thistype == NT_float && !ISDIGIT (*ptr))
1507 thissize = 32;
1508 else
1509 {
1510 parsesize:
1511 thissize = strtoul (ptr, &ptr, 10);
1512
1513 if (thissize != 8 && thissize != 16 && thissize != 32
1514 && thissize != 64)
1515 {
1516 as_bad (_("bad size %d in type specifier"), thissize);
1517 return FAIL;
1518 }
1519 }
1520
1521 done:
1522 if (type)
1523 {
1524 type->el[type->elems].type = thistype;
1525 type->el[type->elems].size = thissize;
1526 type->elems++;
1527 }
1528 }
1529
1530 /* Empty/missing type is not a successful parse. */
1531 if (type->elems == 0)
1532 return FAIL;
1533
1534 *str = ptr;
1535
1536 return SUCCESS;
1537}
1538
1539/* Errors may be set multiple times during parsing or bit encoding
1540 (particularly in the Neon bits), but usually the earliest error which is set
1541 will be the most meaningful. Avoid overwriting it with later (cascading)
1542 errors by calling this function. */
1543
1544static void
1545first_error (const char *err)
1546{
1547 if (!inst.error)
1548 inst.error = err;
1549}
1550
1551/* Parse a single type, e.g. ".s32", leading period included. */
1552static int
1553parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1554{
1555 char *str = *ccp;
1556 struct neon_type optype;
1557
1558 if (*str == '.')
1559 {
1560 if (parse_neon_type (&optype, &str) == SUCCESS)
1561 {
1562 if (optype.elems == 1)
1563 *vectype = optype.el[0];
1564 else
1565 {
1566 first_error (_("only one type should be specified for operand"));
1567 return FAIL;
1568 }
1569 }
1570 else
1571 {
1572 first_error (_("vector type expected"));
1573 return FAIL;
1574 }
1575 }
1576 else
1577 return FAIL;
1578
1579 *ccp = str;
1580
1581 return SUCCESS;
1582}
1583
1584/* Special meanings for indices (which have a range of 0-7), which will fit into
1585 a 4-bit integer. */
1586
1587#define NEON_ALL_LANES 15
1588#define NEON_INTERLEAVE_LANES 14
1589
1590/* Record a use of the given feature. */
1591static void
1592record_feature_use (const arm_feature_set *feature)
1593{
1594 if (thumb_mode)
1595 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1596 else
1597 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1598}
1599
1600/* If the given feature available in the selected CPU, mark it as used.
1601 Returns TRUE iff feature is available. */
1602static bfd_boolean
1603mark_feature_used (const arm_feature_set *feature)
1604{
1605
1606 /* Do not support the use of MVE only instructions when in auto-detection or
1607 -march=all. */
1608 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1609 && ARM_CPU_IS_ANY (cpu_variant))
1610 {
1611 first_error (BAD_MVE_AUTO);
1612 return FALSE;
1613 }
1614 /* Ensure the option is valid on the current architecture. */
1615 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1616 return FALSE;
1617
1618 /* Add the appropriate architecture feature for the barrier option used.
1619 */
1620 record_feature_use (feature);
1621
1622 return TRUE;
1623}
1624
1625/* Parse either a register or a scalar, with an optional type. Return the
1626 register number, and optionally fill in the actual type of the register
1627 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1628 type/index information in *TYPEINFO. */
1629
1630static int
1631parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1632 enum arm_reg_type *rtype,
1633 struct neon_typed_alias *typeinfo)
1634{
1635 char *str = *ccp;
1636 struct reg_entry *reg = arm_reg_parse_multi (&str);
1637 struct neon_typed_alias atype;
1638 struct neon_type_el parsetype;
1639
1640 atype.defined = 0;
1641 atype.index = -1;
1642 atype.eltype.type = NT_invtype;
1643 atype.eltype.size = -1;
1644
1645 /* Try alternate syntax for some types of register. Note these are mutually
1646 exclusive with the Neon syntax extensions. */
1647 if (reg == NULL)
1648 {
1649 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1650 if (altreg != FAIL)
1651 *ccp = str;
1652 if (typeinfo)
1653 *typeinfo = atype;
1654 return altreg;
1655 }
1656
1657 /* Undo polymorphism when a set of register types may be accepted. */
1658 if ((type == REG_TYPE_NDQ
1659 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1660 || (type == REG_TYPE_VFSD
1661 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1662 || (type == REG_TYPE_NSDQ
1663 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1664 || reg->type == REG_TYPE_NQ))
1665 || (type == REG_TYPE_NSD
1666 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1667 || (type == REG_TYPE_MMXWC
1668 && (reg->type == REG_TYPE_MMXWCG)))
1669 type = (enum arm_reg_type) reg->type;
1670
1671 if (type == REG_TYPE_MQ)
1672 {
1673 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1674 return FAIL;
1675
1676 if (!reg || reg->type != REG_TYPE_NQ)
1677 return FAIL;
1678
1679 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1680 {
1681 first_error (_("expected MVE register [q0..q7]"));
1682 return FAIL;
1683 }
1684 type = REG_TYPE_NQ;
1685 }
1686 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1687 && (type == REG_TYPE_NQ))
1688 return FAIL;
1689
1690
1691 if (type != reg->type)
1692 return FAIL;
1693
1694 if (reg->neon)
1695 atype = *reg->neon;
1696
1697 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1698 {
1699 if ((atype.defined & NTA_HASTYPE) != 0)
1700 {
1701 first_error (_("can't redefine type for operand"));
1702 return FAIL;
1703 }
1704 atype.defined |= NTA_HASTYPE;
1705 atype.eltype = parsetype;
1706 }
1707
1708 if (skip_past_char (&str, '[') == SUCCESS)
1709 {
1710 if (type != REG_TYPE_VFD
1711 && !(type == REG_TYPE_VFS
1712 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1713 && !(type == REG_TYPE_NQ
1714 && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1715 {
1716 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1717 first_error (_("only D and Q registers may be indexed"));
1718 else
1719 first_error (_("only D registers may be indexed"));
1720 return FAIL;
1721 }
1722
1723 if ((atype.defined & NTA_HASINDEX) != 0)
1724 {
1725 first_error (_("can't change index for operand"));
1726 return FAIL;
1727 }
1728
1729 atype.defined |= NTA_HASINDEX;
1730
1731 if (skip_past_char (&str, ']') == SUCCESS)
1732 atype.index = NEON_ALL_LANES;
1733 else
1734 {
1735 expressionS exp;
1736
1737 my_get_expression (&exp, &str, GE_NO_PREFIX);
1738
1739 if (exp.X_op != O_constant)
1740 {
1741 first_error (_("constant expression required"));
1742 return FAIL;
1743 }
1744
1745 if (skip_past_char (&str, ']') == FAIL)
1746 return FAIL;
1747
1748 atype.index = exp.X_add_number;
1749 }
1750 }
1751
1752 if (typeinfo)
1753 *typeinfo = atype;
1754
1755 if (rtype)
1756 *rtype = type;
1757
1758 *ccp = str;
1759
1760 return reg->number;
1761}
1762
1763/* Like arm_reg_parse, but also allow the following extra features:
1764 - If RTYPE is non-zero, return the (possibly restricted) type of the
1765 register (e.g. Neon double or quad reg when either has been requested).
1766 - If this is a Neon vector type with additional type information, fill
1767 in the struct pointed to by VECTYPE (if non-NULL).
1768 This function will fault on encountering a scalar. */
1769
1770static int
1771arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1772 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1773{
1774 struct neon_typed_alias atype;
1775 char *str = *ccp;
1776 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1777
1778 if (reg == FAIL)
1779 return FAIL;
1780
1781 /* Do not allow regname(... to parse as a register. */
1782 if (*str == '(')
1783 return FAIL;
1784
1785 /* Do not allow a scalar (reg+index) to parse as a register. */
1786 if ((atype.defined & NTA_HASINDEX) != 0)
1787 {
1788 first_error (_("register operand expected, but got scalar"));
1789 return FAIL;
1790 }
1791
1792 if (vectype)
1793 *vectype = atype.eltype;
1794
1795 *ccp = str;
1796
1797 return reg;
1798}
1799
1800#define NEON_SCALAR_REG(X) ((X) >> 4)
1801#define NEON_SCALAR_INDEX(X) ((X) & 15)
1802
1803/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1804 have enough information to be able to do a good job bounds-checking. So, we
1805 just do easy checks here, and do further checks later. */
1806
1807static int
1808parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1809 arm_reg_type reg_type)
1810{
1811 int reg;
1812 char *str = *ccp;
1813 struct neon_typed_alias atype;
1814 unsigned reg_size;
1815
1816 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1817
1818 switch (reg_type)
1819 {
1820 case REG_TYPE_VFS:
1821 reg_size = 32;
1822 break;
1823 case REG_TYPE_VFD:
1824 reg_size = 64;
1825 break;
1826 case REG_TYPE_MQ:
1827 reg_size = 128;
1828 break;
1829 default:
1830 gas_assert (0);
1831 return FAIL;
1832 }
1833
1834 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1835 return FAIL;
1836
1837 if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1838 {
1839 first_error (_("scalar must have an index"));
1840 return FAIL;
1841 }
1842 else if (atype.index >= reg_size / elsize)
1843 {
1844 first_error (_("scalar index out of range"));
1845 return FAIL;
1846 }
1847
1848 if (type)
1849 *type = atype.eltype;
1850
1851 *ccp = str;
1852
1853 return reg * 16 + atype.index;
1854}
1855
1856/* Types of registers in a list. */
1857
1858enum reg_list_els
1859{
1860 REGLIST_RN,
1861 REGLIST_CLRM,
1862 REGLIST_VFP_S,
1863 REGLIST_VFP_S_VPR,
1864 REGLIST_VFP_D,
1865 REGLIST_VFP_D_VPR,
1866 REGLIST_NEON_D
1867};
1868
1869/* Parse an ARM register list. Returns the bitmask, or FAIL. */
1870
1871static long
1872parse_reg_list (char ** strp, enum reg_list_els etype)
1873{
1874 char *str = *strp;
1875 long range = 0;
1876 int another_range;
1877
1878 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1879
1880 /* We come back here if we get ranges concatenated by '+' or '|'. */
1881 do
1882 {
1883 skip_whitespace (str);
1884
1885 another_range = 0;
1886
1887 if (*str == '{')
1888 {
1889 int in_range = 0;
1890 int cur_reg = -1;
1891
1892 str++;
1893 do
1894 {
1895 int reg;
1896 const char apsr_str[] = "apsr";
1897 int apsr_str_len = strlen (apsr_str);
1898
1899 reg = arm_reg_parse (&str, REGLIST_RN);
1900 if (etype == REGLIST_CLRM)
1901 {
1902 if (reg == REG_SP || reg == REG_PC)
1903 reg = FAIL;
1904 else if (reg == FAIL
1905 && !strncasecmp (str, apsr_str, apsr_str_len)
1906 && !ISALPHA (*(str + apsr_str_len)))
1907 {
1908 reg = 15;
1909 str += apsr_str_len;
1910 }
1911
1912 if (reg == FAIL)
1913 {
1914 first_error (_("r0-r12, lr or APSR expected"));
1915 return FAIL;
1916 }
1917 }
1918 else /* etype == REGLIST_RN. */
1919 {
1920 if (reg == FAIL)
1921 {
1922 first_error (_(reg_expected_msgs[REGLIST_RN]));
1923 return FAIL;
1924 }
1925 }
1926
1927 if (in_range)
1928 {
1929 int i;
1930
1931 if (reg <= cur_reg)
1932 {
1933 first_error (_("bad range in register list"));
1934 return FAIL;
1935 }
1936
1937 for (i = cur_reg + 1; i < reg; i++)
1938 {
1939 if (range & (1 << i))
1940 as_tsktsk
1941 (_("Warning: duplicated register (r%d) in register list"),
1942 i);
1943 else
1944 range |= 1 << i;
1945 }
1946 in_range = 0;
1947 }
1948
1949 if (range & (1 << reg))
1950 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1951 reg);
1952 else if (reg <= cur_reg)
1953 as_tsktsk (_("Warning: register range not in ascending order"));
1954
1955 range |= 1 << reg;
1956 cur_reg = reg;
1957 }
1958 while (skip_past_comma (&str) != FAIL
1959 || (in_range = 1, *str++ == '-'));
1960 str--;
1961
1962 if (skip_past_char (&str, '}') == FAIL)
1963 {
1964 first_error (_("missing `}'"));
1965 return FAIL;
1966 }
1967 }
1968 else if (etype == REGLIST_RN)
1969 {
1970 expressionS exp;
1971
1972 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1973 return FAIL;
1974
1975 if (exp.X_op == O_constant)
1976 {
1977 if (exp.X_add_number
1978 != (exp.X_add_number & 0x0000ffff))
1979 {
1980 inst.error = _("invalid register mask");
1981 return FAIL;
1982 }
1983
1984 if ((range & exp.X_add_number) != 0)
1985 {
1986 int regno = range & exp.X_add_number;
1987
1988 regno &= -regno;
1989 regno = (1 << regno) - 1;
1990 as_tsktsk
1991 (_("Warning: duplicated register (r%d) in register list"),
1992 regno);
1993 }
1994
1995 range |= exp.X_add_number;
1996 }
1997 else
1998 {
1999 if (inst.relocs[0].type != 0)
2000 {
2001 inst.error = _("expression too complex");
2002 return FAIL;
2003 }
2004
2005 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2006 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2007 inst.relocs[0].pc_rel = 0;
2008 }
2009 }
2010
2011 if (*str == '|' || *str == '+')
2012 {
2013 str++;
2014 another_range = 1;
2015 }
2016 }
2017 while (another_range);
2018
2019 *strp = str;
2020 return range;
2021}
2022
2023/* Parse a VFP register list. If the string is invalid return FAIL.
2024 Otherwise return the number of registers, and set PBASE to the first
2025 register. Parses registers of type ETYPE.
2026 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2027 - Q registers can be used to specify pairs of D registers
2028 - { } can be omitted from around a singleton register list
2029 FIXME: This is not implemented, as it would require backtracking in
2030 some cases, e.g.:
2031 vtbl.8 d3,d4,d5
2032 This could be done (the meaning isn't really ambiguous), but doesn't
2033 fit in well with the current parsing framework.
2034 - 32 D registers may be used (also true for VFPv3).
2035 FIXME: Types are ignored in these register lists, which is probably a
2036 bug. */
2037
2038static int
2039parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2040 bfd_boolean *partial_match)
2041{
2042 char *str = *ccp;
2043 int base_reg;
2044 int new_base;
2045 enum arm_reg_type regtype = (enum arm_reg_type) 0;
2046 int max_regs = 0;
2047 int count = 0;
2048 int warned = 0;
2049 unsigned long mask = 0;
2050 int i;
2051 bfd_boolean vpr_seen = FALSE;
2052 bfd_boolean expect_vpr =
2053 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2054
2055 if (skip_past_char (&str, '{') == FAIL)
2056 {
2057 inst.error = _("expecting {");
2058 return FAIL;
2059 }
2060
2061 switch (etype)
2062 {
2063 case REGLIST_VFP_S:
2064 case REGLIST_VFP_S_VPR:
2065 regtype = REG_TYPE_VFS;
2066 max_regs = 32;
2067 break;
2068
2069 case REGLIST_VFP_D:
2070 case REGLIST_VFP_D_VPR:
2071 regtype = REG_TYPE_VFD;
2072 break;
2073
2074 case REGLIST_NEON_D:
2075 regtype = REG_TYPE_NDQ;
2076 break;
2077
2078 default:
2079 gas_assert (0);
2080 }
2081
2082 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2083 {
2084 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2085 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2086 {
2087 max_regs = 32;
2088 if (thumb_mode)
2089 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2090 fpu_vfp_ext_d32);
2091 else
2092 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2093 fpu_vfp_ext_d32);
2094 }
2095 else
2096 max_regs = 16;
2097 }
2098
2099 base_reg = max_regs;
2100 *partial_match = FALSE;
2101
2102 do
2103 {
2104 int setmask = 1, addregs = 1;
2105 const char vpr_str[] = "vpr";
2106 int vpr_str_len = strlen (vpr_str);
2107
2108 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2109
2110 if (expect_vpr)
2111 {
2112 if (new_base == FAIL
2113 && !strncasecmp (str, vpr_str, vpr_str_len)
2114 && !ISALPHA (*(str + vpr_str_len))
2115 && !vpr_seen)
2116 {
2117 vpr_seen = TRUE;
2118 str += vpr_str_len;
2119 if (count == 0)
2120 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2121 }
2122 else if (vpr_seen)
2123 {
2124 first_error (_("VPR expected last"));
2125 return FAIL;
2126 }
2127 else if (new_base == FAIL)
2128 {
2129 if (regtype == REG_TYPE_VFS)
2130 first_error (_("VFP single precision register or VPR "
2131 "expected"));
2132 else /* regtype == REG_TYPE_VFD. */
2133 first_error (_("VFP/Neon double precision register or VPR "
2134 "expected"));
2135 return FAIL;
2136 }
2137 }
2138 else if (new_base == FAIL)
2139 {
2140 first_error (_(reg_expected_msgs[regtype]));
2141 return FAIL;
2142 }
2143
2144 *partial_match = TRUE;
2145 if (vpr_seen)
2146 continue;
2147
2148 if (new_base >= max_regs)
2149 {
2150 first_error (_("register out of range in list"));
2151 return FAIL;
2152 }
2153
2154 /* Note: a value of 2 * n is returned for the register Q<n>. */
2155 if (regtype == REG_TYPE_NQ)
2156 {
2157 setmask = 3;
2158 addregs = 2;
2159 }
2160
2161 if (new_base < base_reg)
2162 base_reg = new_base;
2163
2164 if (mask & (setmask << new_base))
2165 {
2166 first_error (_("invalid register list"));
2167 return FAIL;
2168 }
2169
2170 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2171 {
2172 as_tsktsk (_("register list not in ascending order"));
2173 warned = 1;
2174 }
2175
2176 mask |= setmask << new_base;
2177 count += addregs;
2178
2179 if (*str == '-') /* We have the start of a range expression */
2180 {
2181 int high_range;
2182
2183 str++;
2184
2185 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2186 == FAIL)
2187 {
2188 inst.error = gettext (reg_expected_msgs[regtype]);
2189 return FAIL;
2190 }
2191
2192 if (high_range >= max_regs)
2193 {
2194 first_error (_("register out of range in list"));
2195 return FAIL;
2196 }
2197
2198 if (regtype == REG_TYPE_NQ)
2199 high_range = high_range + 1;
2200
2201 if (high_range <= new_base)
2202 {
2203 inst.error = _("register range not in ascending order");
2204 return FAIL;
2205 }
2206
2207 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2208 {
2209 if (mask & (setmask << new_base))
2210 {
2211 inst.error = _("invalid register list");
2212 return FAIL;
2213 }
2214
2215 mask |= setmask << new_base;
2216 count += addregs;
2217 }
2218 }
2219 }
2220 while (skip_past_comma (&str) != FAIL);
2221
2222 str++;
2223
2224 /* Sanity check -- should have raised a parse error above. */
2225 if ((!vpr_seen && count == 0) || count > max_regs)
2226 abort ();
2227
2228 *pbase = base_reg;
2229
2230 if (expect_vpr && !vpr_seen)
2231 {
2232 first_error (_("VPR expected last"));
2233 return FAIL;
2234 }
2235
2236 /* Final test -- the registers must be consecutive. */
2237 mask >>= base_reg;
2238 for (i = 0; i < count; i++)
2239 {
2240 if ((mask & (1u << i)) == 0)
2241 {
2242 inst.error = _("non-contiguous register range");
2243 return FAIL;
2244 }
2245 }
2246
2247 *ccp = str;
2248
2249 return count;
2250}
2251
2252/* True if two alias types are the same. */
2253
2254static bfd_boolean
2255neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2256{
2257 if (!a && !b)
2258 return TRUE;
2259
2260 if (!a || !b)
2261 return FALSE;
2262
2263 if (a->defined != b->defined)
2264 return FALSE;
2265
2266 if ((a->defined & NTA_HASTYPE) != 0
2267 && (a->eltype.type != b->eltype.type
2268 || a->eltype.size != b->eltype.size))
2269 return FALSE;
2270
2271 if ((a->defined & NTA_HASINDEX) != 0
2272 && (a->index != b->index))
2273 return FALSE;
2274
2275 return TRUE;
2276}
2277
2278/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2279 The base register is put in *PBASE.
2280 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2281 the return value.
2282 The register stride (minus one) is put in bit 4 of the return value.
2283 Bits [6:5] encode the list length (minus one).
2284 The type of the list elements is put in *ELTYPE, if non-NULL. */
2285
2286#define NEON_LANE(X) ((X) & 0xf)
2287#define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2288#define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2289
2290static int
2291parse_neon_el_struct_list (char **str, unsigned *pbase,
2292 int mve,
2293 struct neon_type_el *eltype)
2294{
2295 char *ptr = *str;
2296 int base_reg = -1;
2297 int reg_incr = -1;
2298 int count = 0;
2299 int lane = -1;
2300 int leading_brace = 0;
2301 enum arm_reg_type rtype = REG_TYPE_NDQ;
2302 const char *const incr_error = mve ? _("register stride must be 1") :
2303 _("register stride must be 1 or 2");
2304 const char *const type_error = _("mismatched element/structure types in list");
2305 struct neon_typed_alias firsttype;
2306 firsttype.defined = 0;
2307 firsttype.eltype.type = NT_invtype;
2308 firsttype.eltype.size = -1;
2309 firsttype.index = -1;
2310
2311 if (skip_past_char (&ptr, '{') == SUCCESS)
2312 leading_brace = 1;
2313
2314 do
2315 {
2316 struct neon_typed_alias atype;
2317 if (mve)
2318 rtype = REG_TYPE_MQ;
2319 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2320
2321 if (getreg == FAIL)
2322 {
2323 first_error (_(reg_expected_msgs[rtype]));
2324 return FAIL;
2325 }
2326
2327 if (base_reg == -1)
2328 {
2329 base_reg = getreg;
2330 if (rtype == REG_TYPE_NQ)
2331 {
2332 reg_incr = 1;
2333 }
2334 firsttype = atype;
2335 }
2336 else if (reg_incr == -1)
2337 {
2338 reg_incr = getreg - base_reg;
2339 if (reg_incr < 1 || reg_incr > 2)
2340 {
2341 first_error (_(incr_error));
2342 return FAIL;
2343 }
2344 }
2345 else if (getreg != base_reg + reg_incr * count)
2346 {
2347 first_error (_(incr_error));
2348 return FAIL;
2349 }
2350
2351 if (! neon_alias_types_same (&atype, &firsttype))
2352 {
2353 first_error (_(type_error));
2354 return FAIL;
2355 }
2356
2357 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2358 modes. */
2359 if (ptr[0] == '-')
2360 {
2361 struct neon_typed_alias htype;
2362 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2363 if (lane == -1)
2364 lane = NEON_INTERLEAVE_LANES;
2365 else if (lane != NEON_INTERLEAVE_LANES)
2366 {
2367 first_error (_(type_error));
2368 return FAIL;
2369 }
2370 if (reg_incr == -1)
2371 reg_incr = 1;
2372 else if (reg_incr != 1)
2373 {
2374 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2375 return FAIL;
2376 }
2377 ptr++;
2378 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2379 if (hireg == FAIL)
2380 {
2381 first_error (_(reg_expected_msgs[rtype]));
2382 return FAIL;
2383 }
2384 if (! neon_alias_types_same (&htype, &firsttype))
2385 {
2386 first_error (_(type_error));
2387 return FAIL;
2388 }
2389 count += hireg + dregs - getreg;
2390 continue;
2391 }
2392
2393 /* If we're using Q registers, we can't use [] or [n] syntax. */
2394 if (rtype == REG_TYPE_NQ)
2395 {
2396 count += 2;
2397 continue;
2398 }
2399
2400 if ((atype.defined & NTA_HASINDEX) != 0)
2401 {
2402 if (lane == -1)
2403 lane = atype.index;
2404 else if (lane != atype.index)
2405 {
2406 first_error (_(type_error));
2407 return FAIL;
2408 }
2409 }
2410 else if (lane == -1)
2411 lane = NEON_INTERLEAVE_LANES;
2412 else if (lane != NEON_INTERLEAVE_LANES)
2413 {
2414 first_error (_(type_error));
2415 return FAIL;
2416 }
2417 count++;
2418 }
2419 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2420
2421 /* No lane set by [x]. We must be interleaving structures. */
2422 if (lane == -1)
2423 lane = NEON_INTERLEAVE_LANES;
2424
2425 /* Sanity check. */
2426 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2427 || (count > 1 && reg_incr == -1))
2428 {
2429 first_error (_("error parsing element/structure list"));
2430 return FAIL;
2431 }
2432
2433 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2434 {
2435 first_error (_("expected }"));
2436 return FAIL;
2437 }
2438
2439 if (reg_incr == -1)
2440 reg_incr = 1;
2441
2442 if (eltype)
2443 *eltype = firsttype.eltype;
2444
2445 *pbase = base_reg;
2446 *str = ptr;
2447
2448 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2449}
2450
2451/* Parse an explicit relocation suffix on an expression. This is
2452 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2453 arm_reloc_hsh contains no entries, so this function can only
2454 succeed if there is no () after the word. Returns -1 on error,
2455 BFD_RELOC_UNUSED if there wasn't any suffix. */
2456
2457static int
2458parse_reloc (char **str)
2459{
2460 struct reloc_entry *r;
2461 char *p, *q;
2462
2463 if (**str != '(')
2464 return BFD_RELOC_UNUSED;
2465
2466 p = *str + 1;
2467 q = p;
2468
2469 while (*q && *q != ')' && *q != ',')
2470 q++;
2471 if (*q != ')')
2472 return -1;
2473
2474 if ((r = (struct reloc_entry *)
2475 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2476 return -1;
2477
2478 *str = q + 1;
2479 return r->reloc;
2480}
2481
2482/* Directives: register aliases. */
2483
2484static struct reg_entry *
2485insert_reg_alias (char *str, unsigned number, int type)
2486{
2487 struct reg_entry *new_reg;
2488 const char *name;
2489
2490 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2491 {
2492 if (new_reg->builtin)
2493 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2494
2495 /* Only warn about a redefinition if it's not defined as the
2496 same register. */
2497 else if (new_reg->number != number || new_reg->type != type)
2498 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2499
2500 return NULL;
2501 }
2502
2503 name = xstrdup (str);
2504 new_reg = XNEW (struct reg_entry);
2505
2506 new_reg->name = name;
2507 new_reg->number = number;
2508 new_reg->type = type;
2509 new_reg->builtin = FALSE;
2510 new_reg->neon = NULL;
2511
2512 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2513 abort ();
2514
2515 return new_reg;
2516}
2517
2518static void
2519insert_neon_reg_alias (char *str, int number, int type,
2520 struct neon_typed_alias *atype)
2521{
2522 struct reg_entry *reg = insert_reg_alias (str, number, type);
2523
2524 if (!reg)
2525 {
2526 first_error (_("attempt to redefine typed alias"));
2527 return;
2528 }
2529
2530 if (atype)
2531 {
2532 reg->neon = XNEW (struct neon_typed_alias);
2533 *reg->neon = *atype;
2534 }
2535}
2536
2537/* Look for the .req directive. This is of the form:
2538
2539 new_register_name .req existing_register_name
2540
2541 If we find one, or if it looks sufficiently like one that we want to
2542 handle any error here, return TRUE. Otherwise return FALSE. */
2543
2544static bfd_boolean
2545create_register_alias (char * newname, char *p)
2546{
2547 struct reg_entry *old;
2548 char *oldname, *nbuf;
2549 size_t nlen;
2550
2551 /* The input scrubber ensures that whitespace after the mnemonic is
2552 collapsed to single spaces. */
2553 oldname = p;
2554 if (strncmp (oldname, " .req ", 6) != 0)
2555 return FALSE;
2556
2557 oldname += 6;
2558 if (*oldname == '\0')
2559 return FALSE;
2560
2561 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2562 if (!old)
2563 {
2564 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2565 return TRUE;
2566 }
2567
2568 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2569 the desired alias name, and p points to its end. If not, then
2570 the desired alias name is in the global original_case_string. */
2571#ifdef TC_CASE_SENSITIVE
2572 nlen = p - newname;
2573#else
2574 newname = original_case_string;
2575 nlen = strlen (newname);
2576#endif
2577
2578 nbuf = xmemdup0 (newname, nlen);
2579
2580 /* Create aliases under the new name as stated; an all-lowercase
2581 version of the new name; and an all-uppercase version of the new
2582 name. */
2583 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2584 {
2585 for (p = nbuf; *p; p++)
2586 *p = TOUPPER (*p);
2587
2588 if (strncmp (nbuf, newname, nlen))
2589 {
2590 /* If this attempt to create an additional alias fails, do not bother
2591 trying to create the all-lower case alias. We will fail and issue
2592 a second, duplicate error message. This situation arises when the
2593 programmer does something like:
2594 foo .req r0
2595 Foo .req r1
2596 The second .req creates the "Foo" alias but then fails to create
2597 the artificial FOO alias because it has already been created by the
2598 first .req. */
2599 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2600 {
2601 free (nbuf);
2602 return TRUE;
2603 }
2604 }
2605
2606 for (p = nbuf; *p; p++)
2607 *p = TOLOWER (*p);
2608
2609 if (strncmp (nbuf, newname, nlen))
2610 insert_reg_alias (nbuf, old->number, old->type);
2611 }
2612
2613 free (nbuf);
2614 return TRUE;
2615}
2616
2617/* Create a Neon typed/indexed register alias using directives, e.g.:
2618 X .dn d5.s32[1]
2619 Y .qn 6.s16
2620 Z .dn d7
2621 T .dn Z[0]
2622 These typed registers can be used instead of the types specified after the
2623 Neon mnemonic, so long as all operands given have types. Types can also be
2624 specified directly, e.g.:
2625 vadd d0.s32, d1.s32, d2.s32 */
2626
2627static bfd_boolean
2628create_neon_reg_alias (char *newname, char *p)
2629{
2630 enum arm_reg_type basetype;
2631 struct reg_entry *basereg;
2632 struct reg_entry mybasereg;
2633 struct neon_type ntype;
2634 struct neon_typed_alias typeinfo;
2635 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2636 int namelen;
2637
2638 typeinfo.defined = 0;
2639 typeinfo.eltype.type = NT_invtype;
2640 typeinfo.eltype.size = -1;
2641 typeinfo.index = -1;
2642
2643 nameend = p;
2644
2645 if (strncmp (p, " .dn ", 5) == 0)
2646 basetype = REG_TYPE_VFD;
2647 else if (strncmp (p, " .qn ", 5) == 0)
2648 basetype = REG_TYPE_NQ;
2649 else
2650 return FALSE;
2651
2652 p += 5;
2653
2654 if (*p == '\0')
2655 return FALSE;
2656
2657 basereg = arm_reg_parse_multi (&p);
2658
2659 if (basereg && basereg->type != basetype)
2660 {
2661 as_bad (_("bad type for register"));
2662 return FALSE;
2663 }
2664
2665 if (basereg == NULL)
2666 {
2667 expressionS exp;
2668 /* Try parsing as an integer. */
2669 my_get_expression (&exp, &p, GE_NO_PREFIX);
2670 if (exp.X_op != O_constant)
2671 {
2672 as_bad (_("expression must be constant"));
2673 return FALSE;
2674 }
2675 basereg = &mybasereg;
2676 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2677 : exp.X_add_number;
2678 basereg->neon = 0;
2679 }
2680
2681 if (basereg->neon)
2682 typeinfo = *basereg->neon;
2683
2684 if (parse_neon_type (&ntype, &p) == SUCCESS)
2685 {
2686 /* We got a type. */
2687 if (typeinfo.defined & NTA_HASTYPE)
2688 {
2689 as_bad (_("can't redefine the type of a register alias"));
2690 return FALSE;
2691 }
2692
2693 typeinfo.defined |= NTA_HASTYPE;
2694 if (ntype.elems != 1)
2695 {
2696 as_bad (_("you must specify a single type only"));
2697 return FALSE;
2698 }
2699 typeinfo.eltype = ntype.el[0];
2700 }
2701
2702 if (skip_past_char (&p, '[') == SUCCESS)
2703 {
2704 expressionS exp;
2705 /* We got a scalar index. */
2706
2707 if (typeinfo.defined & NTA_HASINDEX)
2708 {
2709 as_bad (_("can't redefine the index of a scalar alias"));
2710 return FALSE;
2711 }
2712
2713 my_get_expression (&exp, &p, GE_NO_PREFIX);
2714
2715 if (exp.X_op != O_constant)
2716 {
2717 as_bad (_("scalar index must be constant"));
2718 return FALSE;
2719 }
2720
2721 typeinfo.defined |= NTA_HASINDEX;
2722 typeinfo.index = exp.X_add_number;
2723
2724 if (skip_past_char (&p, ']') == FAIL)
2725 {
2726 as_bad (_("expecting ]"));
2727 return FALSE;
2728 }
2729 }
2730
2731 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2732 the desired alias name, and p points to its end. If not, then
2733 the desired alias name is in the global original_case_string. */
2734#ifdef TC_CASE_SENSITIVE
2735 namelen = nameend - newname;
2736#else
2737 newname = original_case_string;
2738 namelen = strlen (newname);
2739#endif
2740
2741 namebuf = xmemdup0 (newname, namelen);
2742
2743 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2744 typeinfo.defined != 0 ? &typeinfo : NULL);
2745
2746 /* Insert name in all uppercase. */
2747 for (p = namebuf; *p; p++)
2748 *p = TOUPPER (*p);
2749
2750 if (strncmp (namebuf, newname, namelen))
2751 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2752 typeinfo.defined != 0 ? &typeinfo : NULL);
2753
2754 /* Insert name in all lowercase. */
2755 for (p = namebuf; *p; p++)
2756 *p = TOLOWER (*p);
2757
2758 if (strncmp (namebuf, newname, namelen))
2759 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2760 typeinfo.defined != 0 ? &typeinfo : NULL);
2761
2762 free (namebuf);
2763 return TRUE;
2764}
2765
2766/* Should never be called, as .req goes between the alias and the
2767 register name, not at the beginning of the line. */
2768
2769static void
2770s_req (int a ATTRIBUTE_UNUSED)
2771{
2772 as_bad (_("invalid syntax for .req directive"));
2773}
2774
2775static void
2776s_dn (int a ATTRIBUTE_UNUSED)
2777{
2778 as_bad (_("invalid syntax for .dn directive"));
2779}
2780
2781static void
2782s_qn (int a ATTRIBUTE_UNUSED)
2783{
2784 as_bad (_("invalid syntax for .qn directive"));
2785}
2786
2787/* The .unreq directive deletes an alias which was previously defined
2788 by .req. For example:
2789
2790 my_alias .req r11
2791 .unreq my_alias */
2792
2793static void
2794s_unreq (int a ATTRIBUTE_UNUSED)
2795{
2796 char * name;
2797 char saved_char;
2798
2799 name = input_line_pointer;
2800
2801 while (*input_line_pointer != 0
2802 && *input_line_pointer != ' '
2803 && *input_line_pointer != '\n')
2804 ++input_line_pointer;
2805
2806 saved_char = *input_line_pointer;
2807 *input_line_pointer = 0;
2808
2809 if (!*name)
2810 as_bad (_("invalid syntax for .unreq directive"));
2811 else
2812 {
2813 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2814 name);
2815
2816 if (!reg)
2817 as_bad (_("unknown register alias '%s'"), name);
2818 else if (reg->builtin)
2819 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2820 name);
2821 else
2822 {
2823 char * p;
2824 char * nbuf;
2825
2826 hash_delete (arm_reg_hsh, name, FALSE);
2827 free ((char *) reg->name);
2828 if (reg->neon)
2829 free (reg->neon);
2830 free (reg);
2831
2832 /* Also locate the all upper case and all lower case versions.
2833 Do not complain if we cannot find one or the other as it
2834 was probably deleted above. */
2835
2836 nbuf = strdup (name);
2837 for (p = nbuf; *p; p++)
2838 *p = TOUPPER (*p);
2839 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2840 if (reg)
2841 {
2842 hash_delete (arm_reg_hsh, nbuf, FALSE);
2843 free ((char *) reg->name);
2844 if (reg->neon)
2845 free (reg->neon);
2846 free (reg);
2847 }
2848
2849 for (p = nbuf; *p; p++)
2850 *p = TOLOWER (*p);
2851 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2852 if (reg)
2853 {
2854 hash_delete (arm_reg_hsh, nbuf, FALSE);
2855 free ((char *) reg->name);
2856 if (reg->neon)
2857 free (reg->neon);
2858 free (reg);
2859 }
2860
2861 free (nbuf);
2862 }
2863 }
2864
2865 *input_line_pointer = saved_char;
2866 demand_empty_rest_of_line ();
2867}
2868
2869/* Directives: Instruction set selection. */
2870
2871#ifdef OBJ_ELF
2872/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2873 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2874 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2875 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2876
2877/* Create a new mapping symbol for the transition to STATE. */
2878
2879static void
2880make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2881{
2882 symbolS * symbolP;
2883 const char * symname;
2884 int type;
2885
2886 switch (state)
2887 {
2888 case MAP_DATA:
2889 symname = "$d";
2890 type = BSF_NO_FLAGS;
2891 break;
2892 case MAP_ARM:
2893 symname = "$a";
2894 type = BSF_NO_FLAGS;
2895 break;
2896 case MAP_THUMB:
2897 symname = "$t";
2898 type = BSF_NO_FLAGS;
2899 break;
2900 default:
2901 abort ();
2902 }
2903
2904 symbolP = symbol_new (symname, now_seg, value, frag);
2905 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2906
2907 switch (state)
2908 {
2909 case MAP_ARM:
2910 THUMB_SET_FUNC (symbolP, 0);
2911 ARM_SET_THUMB (symbolP, 0);
2912 ARM_SET_INTERWORK (symbolP, support_interwork);
2913 break;
2914
2915 case MAP_THUMB:
2916 THUMB_SET_FUNC (symbolP, 1);
2917 ARM_SET_THUMB (symbolP, 1);
2918 ARM_SET_INTERWORK (symbolP, support_interwork);
2919 break;
2920
2921 case MAP_DATA:
2922 default:
2923 break;
2924 }
2925
2926 /* Save the mapping symbols for future reference. Also check that
2927 we do not place two mapping symbols at the same offset within a
2928 frag. We'll handle overlap between frags in
2929 check_mapping_symbols.
2930
2931 If .fill or other data filling directive generates zero sized data,
2932 the mapping symbol for the following code will have the same value
2933 as the one generated for the data filling directive. In this case,
2934 we replace the old symbol with the new one at the same address. */
2935 if (value == 0)
2936 {
2937 if (frag->tc_frag_data.first_map != NULL)
2938 {
2939 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2940 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2941 }
2942 frag->tc_frag_data.first_map = symbolP;
2943 }
2944 if (frag->tc_frag_data.last_map != NULL)
2945 {
2946 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2947 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2948 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2949 }
2950 frag->tc_frag_data.last_map = symbolP;
2951}
2952
2953/* We must sometimes convert a region marked as code to data during
2954 code alignment, if an odd number of bytes have to be padded. The
2955 code mapping symbol is pushed to an aligned address. */
2956
2957static void
2958insert_data_mapping_symbol (enum mstate state,
2959 valueT value, fragS *frag, offsetT bytes)
2960{
2961 /* If there was already a mapping symbol, remove it. */
2962 if (frag->tc_frag_data.last_map != NULL
2963 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2964 {
2965 symbolS *symp = frag->tc_frag_data.last_map;
2966
2967 if (value == 0)
2968 {
2969 know (frag->tc_frag_data.first_map == symp);
2970 frag->tc_frag_data.first_map = NULL;
2971 }
2972 frag->tc_frag_data.last_map = NULL;
2973 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2974 }
2975
2976 make_mapping_symbol (MAP_DATA, value, frag);
2977 make_mapping_symbol (state, value + bytes, frag);
2978}
2979
2980static void mapping_state_2 (enum mstate state, int max_chars);
2981
2982/* Set the mapping state to STATE. Only call this when about to
2983 emit some STATE bytes to the file. */
2984
2985#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2986void
2987mapping_state (enum mstate state)
2988{
2989 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2990
2991 if (mapstate == state)
2992 /* The mapping symbol has already been emitted.
2993 There is nothing else to do. */
2994 return;
2995
2996 if (state == MAP_ARM || state == MAP_THUMB)
2997 /* PR gas/12931
2998 All ARM instructions require 4-byte alignment.
2999 (Almost) all Thumb instructions require 2-byte alignment.
3000
3001 When emitting instructions into any section, mark the section
3002 appropriately.
3003
3004 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3005 but themselves require 2-byte alignment; this applies to some
3006 PC- relative forms. However, these cases will involve implicit
3007 literal pool generation or an explicit .align >=2, both of
3008 which will cause the section to me marked with sufficient
3009 alignment. Thus, we don't handle those cases here. */
3010 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3011
3012 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3013 /* This case will be evaluated later. */
3014 return;
3015
3016 mapping_state_2 (state, 0);
3017}
3018
3019/* Same as mapping_state, but MAX_CHARS bytes have already been
3020 allocated. Put the mapping symbol that far back. */
3021
3022static void
3023mapping_state_2 (enum mstate state, int max_chars)
3024{
3025 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3026
3027 if (!SEG_NORMAL (now_seg))
3028 return;
3029
3030 if (mapstate == state)
3031 /* The mapping symbol has already been emitted.
3032 There is nothing else to do. */
3033 return;
3034
3035 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3036 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3037 {
3038 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3039 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3040
3041 if (add_symbol)
3042 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3043 }
3044
3045 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3046 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3047}
3048#undef TRANSITION
3049#else
3050#define mapping_state(x) ((void)0)
3051#define mapping_state_2(x, y) ((void)0)
3052#endif
3053
3054/* Find the real, Thumb encoded start of a Thumb function. */
3055
3056#ifdef OBJ_COFF
3057static symbolS *
3058find_real_start (symbolS * symbolP)
3059{
3060 char * real_start;
3061 const char * name = S_GET_NAME (symbolP);
3062 symbolS * new_target;
3063
3064 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
3065#define STUB_NAME ".real_start_of"
3066
3067 if (name == NULL)
3068 abort ();
3069
3070 /* The compiler may generate BL instructions to local labels because
3071 it needs to perform a branch to a far away location. These labels
3072 do not have a corresponding ".real_start_of" label. We check
3073 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3074 the ".real_start_of" convention for nonlocal branches. */
3075 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3076 return symbolP;
3077
3078 real_start = concat (STUB_NAME, name, NULL);
3079 new_target = symbol_find (real_start);
3080 free (real_start);
3081
3082 if (new_target == NULL)
3083 {
3084 as_warn (_("Failed to find real start of function: %s\n"), name);
3085 new_target = symbolP;
3086 }
3087
3088 return new_target;
3089}
3090#endif
3091
3092static void
3093opcode_select (int width)
3094{
3095 switch (width)
3096 {
3097 case 16:
3098 if (! thumb_mode)
3099 {
3100 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3101 as_bad (_("selected processor does not support THUMB opcodes"));
3102
3103 thumb_mode = 1;
3104 /* No need to force the alignment, since we will have been
3105 coming from ARM mode, which is word-aligned. */
3106 record_alignment (now_seg, 1);
3107 }
3108 break;
3109
3110 case 32:
3111 if (thumb_mode)
3112 {
3113 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3114 as_bad (_("selected processor does not support ARM opcodes"));
3115
3116 thumb_mode = 0;
3117
3118 if (!need_pass_2)
3119 frag_align (2, 0, 0);
3120
3121 record_alignment (now_seg, 1);
3122 }
3123 break;
3124
3125 default:
3126 as_bad (_("invalid instruction size selected (%d)"), width);
3127 }
3128}
3129
3130static void
3131s_arm (int ignore ATTRIBUTE_UNUSED)
3132{
3133 opcode_select (32);
3134 demand_empty_rest_of_line ();
3135}
3136
3137static void
3138s_thumb (int ignore ATTRIBUTE_UNUSED)
3139{
3140 opcode_select (16);
3141 demand_empty_rest_of_line ();
3142}
3143
3144static void
3145s_code (int unused ATTRIBUTE_UNUSED)
3146{
3147 int temp;
3148
3149 temp = get_absolute_expression ();
3150 switch (temp)
3151 {
3152 case 16:
3153 case 32:
3154 opcode_select (temp);
3155 break;
3156
3157 default:
3158 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3159 }
3160}
3161
3162static void
3163s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3164{
3165 /* If we are not already in thumb mode go into it, EVEN if
3166 the target processor does not support thumb instructions.
3167 This is used by gcc/config/arm/lib1funcs.asm for example
3168 to compile interworking support functions even if the
3169 target processor should not support interworking. */
3170 if (! thumb_mode)
3171 {
3172 thumb_mode = 2;
3173 record_alignment (now_seg, 1);
3174 }
3175
3176 demand_empty_rest_of_line ();
3177}
3178
3179static void
3180s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3181{
3182 s_thumb (0);
3183
3184 /* The following label is the name/address of the start of a Thumb function.
3185 We need to know this for the interworking support. */
3186 label_is_thumb_function_name = TRUE;
3187}
3188
3189/* Perform a .set directive, but also mark the alias as
3190 being a thumb function. */
3191
3192static void
3193s_thumb_set (int equiv)
3194{
3195 /* XXX the following is a duplicate of the code for s_set() in read.c
3196 We cannot just call that code as we need to get at the symbol that
3197 is created. */
3198 char * name;
3199 char delim;
3200 char * end_name;
3201 symbolS * symbolP;
3202
3203 /* Especial apologies for the random logic:
3204 This just grew, and could be parsed much more simply!
3205 Dean - in haste. */
3206 delim = get_symbol_name (& name);
3207 end_name = input_line_pointer;
3208 (void) restore_line_pointer (delim);
3209
3210 if (*input_line_pointer != ',')
3211 {
3212 *end_name = 0;
3213 as_bad (_("expected comma after name \"%s\""), name);
3214 *end_name = delim;
3215 ignore_rest_of_line ();
3216 return;
3217 }
3218
3219 input_line_pointer++;
3220 *end_name = 0;
3221
3222 if (name[0] == '.' && name[1] == '\0')
3223 {
3224 /* XXX - this should not happen to .thumb_set. */
3225 abort ();
3226 }
3227
3228 if ((symbolP = symbol_find (name)) == NULL
3229 && (symbolP = md_undefined_symbol (name)) == NULL)
3230 {
3231#ifndef NO_LISTING
3232 /* When doing symbol listings, play games with dummy fragments living
3233 outside the normal fragment chain to record the file and line info
3234 for this symbol. */
3235 if (listing & LISTING_SYMBOLS)
3236 {
3237 extern struct list_info_struct * listing_tail;
3238 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3239
3240 memset (dummy_frag, 0, sizeof (fragS));
3241 dummy_frag->fr_type = rs_fill;
3242 dummy_frag->line = listing_tail;
3243 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3244 dummy_frag->fr_symbol = symbolP;
3245 }
3246 else
3247#endif
3248 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3249
3250#ifdef OBJ_COFF
3251 /* "set" symbols are local unless otherwise specified. */
3252 SF_SET_LOCAL (symbolP);
3253#endif /* OBJ_COFF */
3254 } /* Make a new symbol. */
3255
3256 symbol_table_insert (symbolP);
3257
3258 * end_name = delim;
3259
3260 if (equiv
3261 && S_IS_DEFINED (symbolP)
3262 && S_GET_SEGMENT (symbolP) != reg_section)
3263 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3264
3265 pseudo_set (symbolP);
3266
3267 demand_empty_rest_of_line ();
3268
3269 /* XXX Now we come to the Thumb specific bit of code. */
3270
3271 THUMB_SET_FUNC (symbolP, 1);
3272 ARM_SET_THUMB (symbolP, 1);
3273#if defined OBJ_ELF || defined OBJ_COFF
3274 ARM_SET_INTERWORK (symbolP, support_interwork);
3275#endif
3276}
3277
3278/* Directives: Mode selection. */
3279
3280/* .syntax [unified|divided] - choose the new unified syntax
3281 (same for Arm and Thumb encoding, modulo slight differences in what
3282 can be represented) or the old divergent syntax for each mode. */
3283static void
3284s_syntax (int unused ATTRIBUTE_UNUSED)
3285{
3286 char *name, delim;
3287
3288 delim = get_symbol_name (& name);
3289
3290 if (!strcasecmp (name, "unified"))
3291 unified_syntax = TRUE;
3292 else if (!strcasecmp (name, "divided"))
3293 unified_syntax = FALSE;
3294 else
3295 {
3296 as_bad (_("unrecognized syntax mode \"%s\""), name);
3297 return;
3298 }
3299 (void) restore_line_pointer (delim);
3300 demand_empty_rest_of_line ();
3301}
3302
3303/* Directives: sectioning and alignment. */
3304
3305static void
3306s_bss (int ignore ATTRIBUTE_UNUSED)
3307{
3308 /* We don't support putting frags in the BSS segment, we fake it by
3309 marking in_bss, then looking at s_skip for clues. */
3310 subseg_set (bss_section, 0);
3311 demand_empty_rest_of_line ();
3312
3313#ifdef md_elf_section_change_hook
3314 md_elf_section_change_hook ();
3315#endif
3316}
3317
3318static void
3319s_even (int ignore ATTRIBUTE_UNUSED)
3320{
3321 /* Never make frag if expect extra pass. */
3322 if (!need_pass_2)
3323 frag_align (1, 0, 0);
3324
3325 record_alignment (now_seg, 1);
3326
3327 demand_empty_rest_of_line ();
3328}
3329
3330/* Directives: CodeComposer Studio. */
3331
3332/* .ref (for CodeComposer Studio syntax only). */
3333static void
3334s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3335{
3336 if (codecomposer_syntax)
3337 ignore_rest_of_line ();
3338 else
3339 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3340}
3341
3342/* If name is not NULL, then it is used for marking the beginning of a
3343 function, whereas if it is NULL then it means the function end. */
3344static void
3345asmfunc_debug (const char * name)
3346{
3347 static const char * last_name = NULL;
3348
3349 if (name != NULL)
3350 {
3351 gas_assert (last_name == NULL);
3352 last_name = name;
3353
3354 if (debug_type == DEBUG_STABS)
3355 stabs_generate_asm_func (name, name);
3356 }
3357 else
3358 {
3359 gas_assert (last_name != NULL);
3360
3361 if (debug_type == DEBUG_STABS)
3362 stabs_generate_asm_endfunc (last_name, last_name);
3363
3364 last_name = NULL;
3365 }
3366}
3367
3368static void
3369s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3370{
3371 if (codecomposer_syntax)
3372 {
3373 switch (asmfunc_state)
3374 {
3375 case OUTSIDE_ASMFUNC:
3376 asmfunc_state = WAITING_ASMFUNC_NAME;
3377 break;
3378
3379 case WAITING_ASMFUNC_NAME:
3380 as_bad (_(".asmfunc repeated."));
3381 break;
3382
3383 case WAITING_ENDASMFUNC:
3384 as_bad (_(".asmfunc without function."));
3385 break;
3386 }
3387 demand_empty_rest_of_line ();
3388 }
3389 else
3390 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3391}
3392
3393static void
3394s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3395{
3396 if (codecomposer_syntax)
3397 {
3398 switch (asmfunc_state)
3399 {
3400 case OUTSIDE_ASMFUNC:
3401 as_bad (_(".endasmfunc without a .asmfunc."));
3402 break;
3403
3404 case WAITING_ASMFUNC_NAME:
3405 as_bad (_(".endasmfunc without function."));
3406 break;
3407
3408 case WAITING_ENDASMFUNC:
3409 asmfunc_state = OUTSIDE_ASMFUNC;
3410 asmfunc_debug (NULL);
3411 break;
3412 }
3413 demand_empty_rest_of_line ();
3414 }
3415 else
3416 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3417}
3418
3419static void
3420s_ccs_def (int name)
3421{
3422 if (codecomposer_syntax)
3423 s_globl (name);
3424 else
3425 as_bad (_(".def pseudo-op only available with -mccs flag."));
3426}
3427
3428/* Directives: Literal pools. */
3429
3430static literal_pool *
3431find_literal_pool (void)
3432{
3433 literal_pool * pool;
3434
3435 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3436 {
3437 if (pool->section == now_seg
3438 && pool->sub_section == now_subseg)
3439 break;
3440 }
3441
3442 return pool;
3443}
3444
3445static literal_pool *
3446find_or_make_literal_pool (void)
3447{
3448 /* Next literal pool ID number. */
3449 static unsigned int latest_pool_num = 1;
3450 literal_pool * pool;
3451
3452 pool = find_literal_pool ();
3453
3454 if (pool == NULL)
3455 {
3456 /* Create a new pool. */
3457 pool = XNEW (literal_pool);
3458 if (! pool)
3459 return NULL;
3460
3461 pool->next_free_entry = 0;
3462 pool->section = now_seg;
3463 pool->sub_section = now_subseg;
3464 pool->next = list_of_pools;
3465 pool->symbol = NULL;
3466 pool->alignment = 2;
3467
3468 /* Add it to the list. */
3469 list_of_pools = pool;
3470 }
3471
3472 /* New pools, and emptied pools, will have a NULL symbol. */
3473 if (pool->symbol == NULL)
3474 {
3475 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3476 (valueT) 0, &zero_address_frag);
3477 pool->id = latest_pool_num ++;
3478 }
3479
3480 /* Done. */
3481 return pool;
3482}
3483
3484/* Add the literal in the global 'inst'
3485 structure to the relevant literal pool. */
3486
3487static int
3488add_to_lit_pool (unsigned int nbytes)
3489{
3490#define PADDING_SLOT 0x1
3491#define LIT_ENTRY_SIZE_MASK 0xFF
3492 literal_pool * pool;
3493 unsigned int entry, pool_size = 0;
3494 bfd_boolean padding_slot_p = FALSE;
3495 unsigned imm1 = 0;
3496 unsigned imm2 = 0;
3497
3498 if (nbytes == 8)
3499 {
3500 imm1 = inst.operands[1].imm;
3501 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3502 : inst.relocs[0].exp.X_unsigned ? 0
3503 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3504 if (target_big_endian)
3505 {
3506 imm1 = imm2;
3507 imm2 = inst.operands[1].imm;
3508 }
3509 }
3510
3511 pool = find_or_make_literal_pool ();
3512
3513 /* Check if this literal value is already in the pool. */
3514 for (entry = 0; entry < pool->next_free_entry; entry ++)
3515 {
3516 if (nbytes == 4)
3517 {
3518 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3519 && (inst.relocs[0].exp.X_op == O_constant)
3520 && (pool->literals[entry].X_add_number
3521 == inst.relocs[0].exp.X_add_number)
3522 && (pool->literals[entry].X_md == nbytes)
3523 && (pool->literals[entry].X_unsigned
3524 == inst.relocs[0].exp.X_unsigned))
3525 break;
3526
3527 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3528 && (inst.relocs[0].exp.X_op == O_symbol)
3529 && (pool->literals[entry].X_add_number
3530 == inst.relocs[0].exp.X_add_number)
3531 && (pool->literals[entry].X_add_symbol
3532 == inst.relocs[0].exp.X_add_symbol)
3533 && (pool->literals[entry].X_op_symbol
3534 == inst.relocs[0].exp.X_op_symbol)
3535 && (pool->literals[entry].X_md == nbytes))
3536 break;
3537 }
3538 else if ((nbytes == 8)
3539 && !(pool_size & 0x7)
3540 && ((entry + 1) != pool->next_free_entry)
3541 && (pool->literals[entry].X_op == O_constant)
3542 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3543 && (pool->literals[entry].X_unsigned
3544 == inst.relocs[0].exp.X_unsigned)
3545 && (pool->literals[entry + 1].X_op == O_constant)
3546 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3547 && (pool->literals[entry + 1].X_unsigned
3548 == inst.relocs[0].exp.X_unsigned))
3549 break;
3550
3551 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3552 if (padding_slot_p && (nbytes == 4))
3553 break;
3554
3555 pool_size += 4;
3556 }
3557
3558 /* Do we need to create a new entry? */
3559 if (entry == pool->next_free_entry)
3560 {
3561 if (entry >= MAX_LITERAL_POOL_SIZE)
3562 {
3563 inst.error = _("literal pool overflow");
3564 return FAIL;
3565 }
3566
3567 if (nbytes == 8)
3568 {
3569 /* For 8-byte entries, we align to an 8-byte boundary,
3570 and split it into two 4-byte entries, because on 32-bit
3571 host, 8-byte constants are treated as big num, thus
3572 saved in "generic_bignum" which will be overwritten
3573 by later assignments.
3574
3575 We also need to make sure there is enough space for
3576 the split.
3577
3578 We also check to make sure the literal operand is a
3579 constant number. */
3580 if (!(inst.relocs[0].exp.X_op == O_constant
3581 || inst.relocs[0].exp.X_op == O_big))
3582 {
3583 inst.error = _("invalid type for literal pool");
3584 return FAIL;
3585 }
3586 else if (pool_size & 0x7)
3587 {
3588 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3589 {
3590 inst.error = _("literal pool overflow");
3591 return FAIL;
3592 }
3593
3594 pool->literals[entry] = inst.relocs[0].exp;
3595 pool->literals[entry].X_op = O_constant;
3596 pool->literals[entry].X_add_number = 0;
3597 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3598 pool->next_free_entry += 1;
3599 pool_size += 4;
3600 }
3601 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3602 {
3603 inst.error = _("literal pool overflow");
3604 return FAIL;
3605 }
3606
3607 pool->literals[entry] = inst.relocs[0].exp;
3608 pool->literals[entry].X_op = O_constant;
3609 pool->literals[entry].X_add_number = imm1;
3610 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3611 pool->literals[entry++].X_md = 4;
3612 pool->literals[entry] = inst.relocs[0].exp;
3613 pool->literals[entry].X_op = O_constant;
3614 pool->literals[entry].X_add_number = imm2;
3615 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3616 pool->literals[entry].X_md = 4;
3617 pool->alignment = 3;
3618 pool->next_free_entry += 1;
3619 }
3620 else
3621 {
3622 pool->literals[entry] = inst.relocs[0].exp;
3623 pool->literals[entry].X_md = 4;
3624 }
3625
3626#ifdef OBJ_ELF
3627 /* PR ld/12974: Record the location of the first source line to reference
3628 this entry in the literal pool. If it turns out during linking that the
3629 symbol does not exist we will be able to give an accurate line number for
3630 the (first use of the) missing reference. */
3631 if (debug_type == DEBUG_DWARF2)
3632 dwarf2_where (pool->locs + entry);
3633#endif
3634 pool->next_free_entry += 1;
3635 }
3636 else if (padding_slot_p)
3637 {
3638 pool->literals[entry] = inst.relocs[0].exp;
3639 pool->literals[entry].X_md = nbytes;
3640 }
3641
3642 inst.relocs[0].exp.X_op = O_symbol;
3643 inst.relocs[0].exp.X_add_number = pool_size;
3644 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3645
3646 return SUCCESS;
3647}
3648
3649bfd_boolean
3650tc_start_label_without_colon (void)
3651{
3652 bfd_boolean ret = TRUE;
3653
3654 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3655 {
3656 const char *label = input_line_pointer;
3657
3658 while (!is_end_of_line[(int) label[-1]])
3659 --label;
3660
3661 if (*label == '.')
3662 {
3663 as_bad (_("Invalid label '%s'"), label);
3664 ret = FALSE;
3665 }
3666
3667 asmfunc_debug (label);
3668
3669 asmfunc_state = WAITING_ENDASMFUNC;
3670 }
3671
3672 return ret;
3673}
3674
3675/* Can't use symbol_new here, so have to create a symbol and then at
3676 a later date assign it a value. That's what these functions do. */
3677
3678static void
3679symbol_locate (symbolS * symbolP,
3680 const char * name, /* It is copied, the caller can modify. */
3681 segT segment, /* Segment identifier (SEG_<something>). */
3682 valueT valu, /* Symbol value. */
3683 fragS * frag) /* Associated fragment. */
3684{
3685 size_t name_length;
3686 char * preserved_copy_of_name;
3687
3688 name_length = strlen (name) + 1; /* +1 for \0. */
3689 obstack_grow (&notes, name, name_length);
3690 preserved_copy_of_name = (char *) obstack_finish (&notes);
3691
3692#ifdef tc_canonicalize_symbol_name
3693 preserved_copy_of_name =
3694 tc_canonicalize_symbol_name (preserved_copy_of_name);
3695#endif
3696
3697 S_SET_NAME (symbolP, preserved_copy_of_name);
3698
3699 S_SET_SEGMENT (symbolP, segment);
3700 S_SET_VALUE (symbolP, valu);
3701 symbol_clear_list_pointers (symbolP);
3702
3703 symbol_set_frag (symbolP, frag);
3704
3705 /* Link to end of symbol chain. */
3706 {
3707 extern int symbol_table_frozen;
3708
3709 if (symbol_table_frozen)
3710 abort ();
3711 }
3712
3713 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3714
3715 obj_symbol_new_hook (symbolP);
3716
3717#ifdef tc_symbol_new_hook
3718 tc_symbol_new_hook (symbolP);
3719#endif
3720
3721#ifdef DEBUG_SYMS
3722 verify_symbol_chain (symbol_rootP, symbol_lastP);
3723#endif /* DEBUG_SYMS */
3724}
3725
3726static void
3727s_ltorg (int ignored ATTRIBUTE_UNUSED)
3728{
3729 unsigned int entry;
3730 literal_pool * pool;
3731 char sym_name[20];
3732
3733 pool = find_literal_pool ();
3734 if (pool == NULL
3735 || pool->symbol == NULL
3736 || pool->next_free_entry == 0)
3737 return;
3738
3739 /* Align pool as you have word accesses.
3740 Only make a frag if we have to. */
3741 if (!need_pass_2)
3742 frag_align (pool->alignment, 0, 0);
3743
3744 record_alignment (now_seg, 2);
3745
3746#ifdef OBJ_ELF
3747 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3748 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3749#endif
3750 sprintf (sym_name, "$$lit_\002%x", pool->id);
3751
3752 symbol_locate (pool->symbol, sym_name, now_seg,
3753 (valueT) frag_now_fix (), frag_now);
3754 symbol_table_insert (pool->symbol);
3755
3756 ARM_SET_THUMB (pool->symbol, thumb_mode);
3757
3758#if defined OBJ_COFF || defined OBJ_ELF
3759 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3760#endif
3761
3762 for (entry = 0; entry < pool->next_free_entry; entry ++)
3763 {
3764#ifdef OBJ_ELF
3765 if (debug_type == DEBUG_DWARF2)
3766 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3767#endif
3768 /* First output the expression in the instruction to the pool. */
3769 emit_expr (&(pool->literals[entry]),
3770 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3771 }
3772
3773 /* Mark the pool as empty. */
3774 pool->next_free_entry = 0;
3775 pool->symbol = NULL;
3776}
3777
3778#ifdef OBJ_ELF
3779/* Forward declarations for functions below, in the MD interface
3780 section. */
3781static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3782static valueT create_unwind_entry (int);
3783static void start_unwind_section (const segT, int);
3784static void add_unwind_opcode (valueT, int);
3785static void flush_pending_unwind (void);
3786
3787/* Directives: Data. */
3788
3789static void
3790s_arm_elf_cons (int nbytes)
3791{
3792 expressionS exp;
3793
3794#ifdef md_flush_pending_output
3795 md_flush_pending_output ();
3796#endif
3797
3798 if (is_it_end_of_statement ())
3799 {
3800 demand_empty_rest_of_line ();
3801 return;
3802 }
3803
3804#ifdef md_cons_align
3805 md_cons_align (nbytes);
3806#endif
3807
3808 mapping_state (MAP_DATA);
3809 do
3810 {
3811 int reloc;
3812 char *base = input_line_pointer;
3813
3814 expression (& exp);
3815
3816 if (exp.X_op != O_symbol)
3817 emit_expr (&exp, (unsigned int) nbytes);
3818 else
3819 {
3820 char *before_reloc = input_line_pointer;
3821 reloc = parse_reloc (&input_line_pointer);
3822 if (reloc == -1)
3823 {
3824 as_bad (_("unrecognized relocation suffix"));
3825 ignore_rest_of_line ();
3826 return;
3827 }
3828 else if (reloc == BFD_RELOC_UNUSED)
3829 emit_expr (&exp, (unsigned int) nbytes);
3830 else
3831 {
3832 reloc_howto_type *howto = (reloc_howto_type *)
3833 bfd_reloc_type_lookup (stdoutput,
3834 (bfd_reloc_code_real_type) reloc);
3835 int size = bfd_get_reloc_size (howto);
3836
3837 if (reloc == BFD_RELOC_ARM_PLT32)
3838 {
3839 as_bad (_("(plt) is only valid on branch targets"));
3840 reloc = BFD_RELOC_UNUSED;
3841 size = 0;
3842 }
3843
3844 if (size > nbytes)
3845 as_bad (ngettext ("%s relocations do not fit in %d byte",
3846 "%s relocations do not fit in %d bytes",
3847 nbytes),
3848 howto->name, nbytes);
3849 else
3850 {
3851 /* We've parsed an expression stopping at O_symbol.
3852 But there may be more expression left now that we
3853 have parsed the relocation marker. Parse it again.
3854 XXX Surely there is a cleaner way to do this. */
3855 char *p = input_line_pointer;
3856 int offset;
3857 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3858
3859 memcpy (save_buf, base, input_line_pointer - base);
3860 memmove (base + (input_line_pointer - before_reloc),
3861 base, before_reloc - base);
3862
3863 input_line_pointer = base + (input_line_pointer-before_reloc);
3864 expression (&exp);
3865 memcpy (base, save_buf, p - base);
3866
3867 offset = nbytes - size;
3868 p = frag_more (nbytes);
3869 memset (p, 0, nbytes);
3870 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3871 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3872 free (save_buf);
3873 }
3874 }
3875 }
3876 }
3877 while (*input_line_pointer++ == ',');
3878
3879 /* Put terminator back into stream. */
3880 input_line_pointer --;
3881 demand_empty_rest_of_line ();
3882}
3883
3884/* Emit an expression containing a 32-bit thumb instruction.
3885 Implementation based on put_thumb32_insn. */
3886
3887static void
3888emit_thumb32_expr (expressionS * exp)
3889{
3890 expressionS exp_high = *exp;
3891
3892 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3893 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3894 exp->X_add_number &= 0xffff;
3895 emit_expr (exp, (unsigned int) THUMB_SIZE);
3896}
3897
3898/* Guess the instruction size based on the opcode. */
3899
3900static int
3901thumb_insn_size (int opcode)
3902{
3903 if ((unsigned int) opcode < 0xe800u)
3904 return 2;
3905 else if ((unsigned int) opcode >= 0xe8000000u)
3906 return 4;
3907 else
3908 return 0;
3909}
3910
3911static bfd_boolean
3912emit_insn (expressionS *exp, int nbytes)
3913{
3914 int size = 0;
3915
3916 if (exp->X_op == O_constant)
3917 {
3918 size = nbytes;
3919
3920 if (size == 0)
3921 size = thumb_insn_size (exp->X_add_number);
3922
3923 if (size != 0)
3924 {
3925 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3926 {
3927 as_bad (_(".inst.n operand too big. "\
3928 "Use .inst.w instead"));
3929 size = 0;
3930 }
3931 else
3932 {
3933 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3934 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3935 else
3936 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3937
3938 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3939 emit_thumb32_expr (exp);
3940 else
3941 emit_expr (exp, (unsigned int) size);
3942
3943 it_fsm_post_encode ();
3944 }
3945 }
3946 else
3947 as_bad (_("cannot determine Thumb instruction size. " \
3948 "Use .inst.n/.inst.w instead"));
3949 }
3950 else
3951 as_bad (_("constant expression required"));
3952
3953 return (size != 0);
3954}
3955
3956/* Like s_arm_elf_cons but do not use md_cons_align and
3957 set the mapping state to MAP_ARM/MAP_THUMB. */
3958
3959static void
3960s_arm_elf_inst (int nbytes)
3961{
3962 if (is_it_end_of_statement ())
3963 {
3964 demand_empty_rest_of_line ();
3965 return;
3966 }
3967
3968 /* Calling mapping_state () here will not change ARM/THUMB,
3969 but will ensure not to be in DATA state. */
3970
3971 if (thumb_mode)
3972 mapping_state (MAP_THUMB);
3973 else
3974 {
3975 if (nbytes != 0)
3976 {
3977 as_bad (_("width suffixes are invalid in ARM mode"));
3978 ignore_rest_of_line ();
3979 return;
3980 }
3981
3982 nbytes = 4;
3983
3984 mapping_state (MAP_ARM);
3985 }
3986
3987 do
3988 {
3989 expressionS exp;
3990
3991 expression (& exp);
3992
3993 if (! emit_insn (& exp, nbytes))
3994 {
3995 ignore_rest_of_line ();
3996 return;
3997 }
3998 }
3999 while (*input_line_pointer++ == ',');
4000
4001 /* Put terminator back into stream. */
4002 input_line_pointer --;
4003 demand_empty_rest_of_line ();
4004}
4005
4006/* Parse a .rel31 directive. */
4007
4008static void
4009s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4010{
4011 expressionS exp;
4012 char *p;
4013 valueT highbit;
4014
4015 highbit = 0;
4016 if (*input_line_pointer == '1')
4017 highbit = 0x80000000;
4018 else if (*input_line_pointer != '0')
4019 as_bad (_("expected 0 or 1"));
4020
4021 input_line_pointer++;
4022 if (*input_line_pointer != ',')
4023 as_bad (_("missing comma"));
4024 input_line_pointer++;
4025
4026#ifdef md_flush_pending_output
4027 md_flush_pending_output ();
4028#endif
4029
4030#ifdef md_cons_align
4031 md_cons_align (4);
4032#endif
4033
4034 mapping_state (MAP_DATA);
4035
4036 expression (&exp);
4037
4038 p = frag_more (4);
4039 md_number_to_chars (p, highbit, 4);
4040 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4041 BFD_RELOC_ARM_PREL31);
4042
4043 demand_empty_rest_of_line ();
4044}
4045
4046/* Directives: AEABI stack-unwind tables. */
4047
4048/* Parse an unwind_fnstart directive. Simply records the current location. */
4049
4050static void
4051s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4052{
4053 demand_empty_rest_of_line ();
4054 if (unwind.proc_start)
4055 {
4056 as_bad (_("duplicate .fnstart directive"));
4057 return;
4058 }
4059
4060 /* Mark the start of the function. */
4061 unwind.proc_start = expr_build_dot ();
4062
4063 /* Reset the rest of the unwind info. */
4064 unwind.opcode_count = 0;
4065 unwind.table_entry = NULL;
4066 unwind.personality_routine = NULL;
4067 unwind.personality_index = -1;
4068 unwind.frame_size = 0;
4069 unwind.fp_offset = 0;
4070 unwind.fp_reg = REG_SP;
4071 unwind.fp_used = 0;
4072 unwind.sp_restored = 0;
4073}
4074
4075
4076/* Parse a handlerdata directive. Creates the exception handling table entry
4077 for the function. */
4078
4079static void
4080s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4081{
4082 demand_empty_rest_of_line ();
4083 if (!unwind.proc_start)
4084 as_bad (MISSING_FNSTART);
4085
4086 if (unwind.table_entry)
4087 as_bad (_("duplicate .handlerdata directive"));
4088
4089 create_unwind_entry (1);
4090}
4091
4092/* Parse an unwind_fnend directive. Generates the index table entry. */
4093
4094static void
4095s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4096{
4097 long where;
4098 char *ptr;
4099 valueT val;
4100 unsigned int marked_pr_dependency;
4101
4102 demand_empty_rest_of_line ();
4103
4104 if (!unwind.proc_start)
4105 {
4106 as_bad (_(".fnend directive without .fnstart"));
4107 return;
4108 }
4109
4110 /* Add eh table entry. */
4111 if (unwind.table_entry == NULL)
4112 val = create_unwind_entry (0);
4113 else
4114 val = 0;
4115
4116 /* Add index table entry. This is two words. */
4117 start_unwind_section (unwind.saved_seg, 1);
4118 frag_align (2, 0, 0);
4119 record_alignment (now_seg, 2);
4120
4121 ptr = frag_more (8);
4122 memset (ptr, 0, 8);
4123 where = frag_now_fix () - 8;
4124
4125 /* Self relative offset of the function start. */
4126 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4127 BFD_RELOC_ARM_PREL31);
4128
4129 /* Indicate dependency on EHABI-defined personality routines to the
4130 linker, if it hasn't been done already. */
4131 marked_pr_dependency
4132 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4133 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4134 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4135 {
4136 static const char *const name[] =
4137 {
4138 "__aeabi_unwind_cpp_pr0",
4139 "__aeabi_unwind_cpp_pr1",
4140 "__aeabi_unwind_cpp_pr2"
4141 };
4142 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4143 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4144 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4145 |= 1 << unwind.personality_index;
4146 }
4147
4148 if (val)
4149 /* Inline exception table entry. */
4150 md_number_to_chars (ptr + 4, val, 4);
4151 else
4152 /* Self relative offset of the table entry. */
4153 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4154 BFD_RELOC_ARM_PREL31);
4155
4156 /* Restore the original section. */
4157 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4158
4159 unwind.proc_start = NULL;
4160}
4161
4162
4163/* Parse an unwind_cantunwind directive. */
4164
4165static void
4166s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4167{
4168 demand_empty_rest_of_line ();
4169 if (!unwind.proc_start)
4170 as_bad (MISSING_FNSTART);
4171
4172 if (unwind.personality_routine || unwind.personality_index != -1)
4173 as_bad (_("personality routine specified for cantunwind frame"));
4174
4175 unwind.personality_index = -2;
4176}
4177
4178
4179/* Parse a personalityindex directive. */
4180
4181static void
4182s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4183{
4184 expressionS exp;
4185
4186 if (!unwind.proc_start)
4187 as_bad (MISSING_FNSTART);
4188
4189 if (unwind.personality_routine || unwind.personality_index != -1)
4190 as_bad (_("duplicate .personalityindex directive"));
4191
4192 expression (&exp);
4193
4194 if (exp.X_op != O_constant
4195 || exp.X_add_number < 0 || exp.X_add_number > 15)
4196 {
4197 as_bad (_("bad personality routine number"));
4198 ignore_rest_of_line ();
4199 return;
4200 }
4201
4202 unwind.personality_index = exp.X_add_number;
4203
4204 demand_empty_rest_of_line ();
4205}
4206
4207
4208/* Parse a personality directive. */
4209
4210static void
4211s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4212{
4213 char *name, *p, c;
4214
4215 if (!unwind.proc_start)
4216 as_bad (MISSING_FNSTART);
4217
4218 if (unwind.personality_routine || unwind.personality_index != -1)
4219 as_bad (_("duplicate .personality directive"));
4220
4221 c = get_symbol_name (& name);
4222 p = input_line_pointer;
4223 if (c == '"')
4224 ++ input_line_pointer;
4225 unwind.personality_routine = symbol_find_or_make (name);
4226 *p = c;
4227 demand_empty_rest_of_line ();
4228}
4229
4230
4231/* Parse a directive saving core registers. */
4232
4233static void
4234s_arm_unwind_save_core (void)
4235{
4236 valueT op;
4237 long range;
4238 int n;
4239
4240 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4241 if (range == FAIL)
4242 {
4243 as_bad (_("expected register list"));
4244 ignore_rest_of_line ();
4245 return;
4246 }
4247
4248 demand_empty_rest_of_line ();
4249
4250 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4251 into .unwind_save {..., sp...}. We aren't bothered about the value of
4252 ip because it is clobbered by calls. */
4253 if (unwind.sp_restored && unwind.fp_reg == 12
4254 && (range & 0x3000) == 0x1000)
4255 {
4256 unwind.opcode_count--;
4257 unwind.sp_restored = 0;
4258 range = (range | 0x2000) & ~0x1000;
4259 unwind.pending_offset = 0;
4260 }
4261
4262 /* Pop r4-r15. */
4263 if (range & 0xfff0)
4264 {
4265 /* See if we can use the short opcodes. These pop a block of up to 8
4266 registers starting with r4, plus maybe r14. */
4267 for (n = 0; n < 8; n++)
4268 {
4269 /* Break at the first non-saved register. */
4270 if ((range & (1 << (n + 4))) == 0)
4271 break;
4272 }
4273 /* See if there are any other bits set. */
4274 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4275 {
4276 /* Use the long form. */
4277 op = 0x8000 | ((range >> 4) & 0xfff);
4278 add_unwind_opcode (op, 2);
4279 }
4280 else
4281 {
4282 /* Use the short form. */
4283 if (range & 0x4000)
4284 op = 0xa8; /* Pop r14. */
4285 else
4286 op = 0xa0; /* Do not pop r14. */
4287 op |= (n - 1);
4288 add_unwind_opcode (op, 1);
4289 }
4290 }
4291
4292 /* Pop r0-r3. */
4293 if (range & 0xf)
4294 {
4295 op = 0xb100 | (range & 0xf);
4296 add_unwind_opcode (op, 2);
4297 }
4298
4299 /* Record the number of bytes pushed. */
4300 for (n = 0; n < 16; n++)
4301 {
4302 if (range & (1 << n))
4303 unwind.frame_size += 4;
4304 }
4305}
4306
4307
4308/* Parse a directive saving FPA registers. */
4309
4310static void
4311s_arm_unwind_save_fpa (int reg)
4312{
4313 expressionS exp;
4314 int num_regs;
4315 valueT op;
4316
4317 /* Get Number of registers to transfer. */
4318 if (skip_past_comma (&input_line_pointer) != FAIL)
4319 expression (&exp);
4320 else
4321 exp.X_op = O_illegal;
4322
4323 if (exp.X_op != O_constant)
4324 {
4325 as_bad (_("expected , <constant>"));
4326 ignore_rest_of_line ();
4327 return;
4328 }
4329
4330 num_regs = exp.X_add_number;
4331
4332 if (num_regs < 1 || num_regs > 4)
4333 {
4334 as_bad (_("number of registers must be in the range [1:4]"));
4335 ignore_rest_of_line ();
4336 return;
4337 }
4338
4339 demand_empty_rest_of_line ();
4340
4341 if (reg == 4)
4342 {
4343 /* Short form. */
4344 op = 0xb4 | (num_regs - 1);
4345 add_unwind_opcode (op, 1);
4346 }
4347 else
4348 {
4349 /* Long form. */
4350 op = 0xc800 | (reg << 4) | (num_regs - 1);
4351 add_unwind_opcode (op, 2);
4352 }
4353 unwind.frame_size += num_regs * 12;
4354}
4355
4356
4357/* Parse a directive saving VFP registers for ARMv6 and above. */
4358
4359static void
4360s_arm_unwind_save_vfp_armv6 (void)
4361{
4362 int count;
4363 unsigned int start;
4364 valueT op;
4365 int num_vfpv3_regs = 0;
4366 int num_regs_below_16;
4367 bfd_boolean partial_match;
4368
4369 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4370 &partial_match);
4371 if (count == FAIL)
4372 {
4373 as_bad (_("expected register list"));
4374 ignore_rest_of_line ();
4375 return;
4376 }
4377
4378 demand_empty_rest_of_line ();
4379
4380 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4381 than FSTMX/FLDMX-style ones). */
4382
4383 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4384 if (start >= 16)
4385 num_vfpv3_regs = count;
4386 else if (start + count > 16)
4387 num_vfpv3_regs = start + count - 16;
4388
4389 if (num_vfpv3_regs > 0)
4390 {
4391 int start_offset = start > 16 ? start - 16 : 0;
4392 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4393 add_unwind_opcode (op, 2);
4394 }
4395
4396 /* Generate opcode for registers numbered in the range 0 .. 15. */
4397 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4398 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4399 if (num_regs_below_16 > 0)
4400 {
4401 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4402 add_unwind_opcode (op, 2);
4403 }
4404
4405 unwind.frame_size += count * 8;
4406}
4407
4408
4409/* Parse a directive saving VFP registers for pre-ARMv6. */
4410
4411static void
4412s_arm_unwind_save_vfp (void)
4413{
4414 int count;
4415 unsigned int reg;
4416 valueT op;
4417 bfd_boolean partial_match;
4418
4419 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4420 &partial_match);
4421 if (count == FAIL)
4422 {
4423 as_bad (_("expected register list"));
4424 ignore_rest_of_line ();
4425 return;
4426 }
4427
4428 demand_empty_rest_of_line ();
4429
4430 if (reg == 8)
4431 {
4432 /* Short form. */
4433 op = 0xb8 | (count - 1);
4434 add_unwind_opcode (op, 1);
4435 }
4436 else
4437 {
4438 /* Long form. */
4439 op = 0xb300 | (reg << 4) | (count - 1);
4440 add_unwind_opcode (op, 2);
4441 }
4442 unwind.frame_size += count * 8 + 4;
4443}
4444
4445
4446/* Parse a directive saving iWMMXt data registers. */
4447
4448static void
4449s_arm_unwind_save_mmxwr (void)
4450{
4451 int reg;
4452 int hi_reg;
4453 int i;
4454 unsigned mask = 0;
4455 valueT op;
4456
4457 if (*input_line_pointer == '{')
4458 input_line_pointer++;
4459
4460 do
4461 {
4462 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4463
4464 if (reg == FAIL)
4465 {
4466 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4467 goto error;
4468 }
4469
4470 if (mask >> reg)
4471 as_tsktsk (_("register list not in ascending order"));
4472 mask |= 1 << reg;
4473
4474 if (*input_line_pointer == '-')
4475 {
4476 input_line_pointer++;
4477 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4478 if (hi_reg == FAIL)
4479 {
4480 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4481 goto error;
4482 }
4483 else if (reg >= hi_reg)
4484 {
4485 as_bad (_("bad register range"));
4486 goto error;
4487 }
4488 for (; reg < hi_reg; reg++)
4489 mask |= 1 << reg;
4490 }
4491 }
4492 while (skip_past_comma (&input_line_pointer) != FAIL);
4493
4494 skip_past_char (&input_line_pointer, '}');
4495
4496 demand_empty_rest_of_line ();
4497
4498 /* Generate any deferred opcodes because we're going to be looking at
4499 the list. */
4500 flush_pending_unwind ();
4501
4502 for (i = 0; i < 16; i++)
4503 {
4504 if (mask & (1 << i))
4505 unwind.frame_size += 8;
4506 }
4507
4508 /* Attempt to combine with a previous opcode. We do this because gcc
4509 likes to output separate unwind directives for a single block of
4510 registers. */
4511 if (unwind.opcode_count > 0)
4512 {
4513 i = unwind.opcodes[unwind.opcode_count - 1];
4514 if ((i & 0xf8) == 0xc0)
4515 {
4516 i &= 7;
4517 /* Only merge if the blocks are contiguous. */
4518 if (i < 6)
4519 {
4520 if ((mask & 0xfe00) == (1 << 9))
4521 {
4522 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4523 unwind.opcode_count--;
4524 }
4525 }
4526 else if (i == 6 && unwind.opcode_count >= 2)
4527 {
4528 i = unwind.opcodes[unwind.opcode_count - 2];
4529 reg = i >> 4;
4530 i &= 0xf;
4531
4532 op = 0xffff << (reg - 1);
4533 if (reg > 0
4534 && ((mask & op) == (1u << (reg - 1))))
4535 {
4536 op = (1 << (reg + i + 1)) - 1;
4537 op &= ~((1 << reg) - 1);
4538 mask |= op;
4539 unwind.opcode_count -= 2;
4540 }
4541 }
4542 }
4543 }
4544
4545 hi_reg = 15;
4546 /* We want to generate opcodes in the order the registers have been
4547 saved, ie. descending order. */
4548 for (reg = 15; reg >= -1; reg--)
4549 {
4550 /* Save registers in blocks. */
4551 if (reg < 0
4552 || !(mask & (1 << reg)))
4553 {
4554 /* We found an unsaved reg. Generate opcodes to save the
4555 preceding block. */
4556 if (reg != hi_reg)
4557 {
4558 if (reg == 9)
4559 {
4560 /* Short form. */
4561 op = 0xc0 | (hi_reg - 10);
4562 add_unwind_opcode (op, 1);
4563 }
4564 else
4565 {
4566 /* Long form. */
4567 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4568 add_unwind_opcode (op, 2);
4569 }
4570 }
4571 hi_reg = reg - 1;
4572 }
4573 }
4574
4575 return;
4576error:
4577 ignore_rest_of_line ();
4578}
4579
4580static void
4581s_arm_unwind_save_mmxwcg (void)
4582{
4583 int reg;
4584 int hi_reg;
4585 unsigned mask = 0;
4586 valueT op;
4587
4588 if (*input_line_pointer == '{')
4589 input_line_pointer++;
4590
4591 skip_whitespace (input_line_pointer);
4592
4593 do
4594 {
4595 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4596
4597 if (reg == FAIL)
4598 {
4599 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4600 goto error;
4601 }
4602
4603 reg -= 8;
4604 if (mask >> reg)
4605 as_tsktsk (_("register list not in ascending order"));
4606 mask |= 1 << reg;
4607
4608 if (*input_line_pointer == '-')
4609 {
4610 input_line_pointer++;
4611 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4612 if (hi_reg == FAIL)
4613 {
4614 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4615 goto error;
4616 }
4617 else if (reg >= hi_reg)
4618 {
4619 as_bad (_("bad register range"));
4620 goto error;
4621 }
4622 for (; reg < hi_reg; reg++)
4623 mask |= 1 << reg;
4624 }
4625 }
4626 while (skip_past_comma (&input_line_pointer) != FAIL);
4627
4628 skip_past_char (&input_line_pointer, '}');
4629
4630 demand_empty_rest_of_line ();
4631
4632 /* Generate any deferred opcodes because we're going to be looking at
4633 the list. */
4634 flush_pending_unwind ();
4635
4636 for (reg = 0; reg < 16; reg++)
4637 {
4638 if (mask & (1 << reg))
4639 unwind.frame_size += 4;
4640 }
4641 op = 0xc700 | mask;
4642 add_unwind_opcode (op, 2);
4643 return;
4644error:
4645 ignore_rest_of_line ();
4646}
4647
4648
4649/* Parse an unwind_save directive.
4650 If the argument is non-zero, this is a .vsave directive. */
4651
4652static void
4653s_arm_unwind_save (int arch_v6)
4654{
4655 char *peek;
4656 struct reg_entry *reg;
4657 bfd_boolean had_brace = FALSE;
4658
4659 if (!unwind.proc_start)
4660 as_bad (MISSING_FNSTART);
4661
4662 /* Figure out what sort of save we have. */
4663 peek = input_line_pointer;
4664
4665 if (*peek == '{')
4666 {
4667 had_brace = TRUE;
4668 peek++;
4669 }
4670
4671 reg = arm_reg_parse_multi (&peek);
4672
4673 if (!reg)
4674 {
4675 as_bad (_("register expected"));
4676 ignore_rest_of_line ();
4677 return;
4678 }
4679
4680 switch (reg->type)
4681 {
4682 case REG_TYPE_FN:
4683 if (had_brace)
4684 {
4685 as_bad (_("FPA .unwind_save does not take a register list"));
4686 ignore_rest_of_line ();
4687 return;
4688 }
4689 input_line_pointer = peek;
4690 s_arm_unwind_save_fpa (reg->number);
4691 return;
4692
4693 case REG_TYPE_RN:
4694 s_arm_unwind_save_core ();
4695 return;
4696
4697 case REG_TYPE_VFD:
4698 if (arch_v6)
4699 s_arm_unwind_save_vfp_armv6 ();
4700 else
4701 s_arm_unwind_save_vfp ();
4702 return;
4703
4704 case REG_TYPE_MMXWR:
4705 s_arm_unwind_save_mmxwr ();
4706 return;
4707
4708 case REG_TYPE_MMXWCG:
4709 s_arm_unwind_save_mmxwcg ();
4710 return;
4711
4712 default:
4713 as_bad (_(".unwind_save does not support this kind of register"));
4714 ignore_rest_of_line ();
4715 }
4716}
4717
4718
4719/* Parse an unwind_movsp directive. */
4720
4721static void
4722s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4723{
4724 int reg;
4725 valueT op;
4726 int offset;
4727
4728 if (!unwind.proc_start)
4729 as_bad (MISSING_FNSTART);
4730
4731 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4732 if (reg == FAIL)
4733 {
4734 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4735 ignore_rest_of_line ();
4736 return;
4737 }
4738
4739 /* Optional constant. */
4740 if (skip_past_comma (&input_line_pointer) != FAIL)
4741 {
4742 if (immediate_for_directive (&offset) == FAIL)
4743 return;
4744 }
4745 else
4746 offset = 0;
4747
4748 demand_empty_rest_of_line ();
4749
4750 if (reg == REG_SP || reg == REG_PC)
4751 {
4752 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4753 return;
4754 }
4755
4756 if (unwind.fp_reg != REG_SP)
4757 as_bad (_("unexpected .unwind_movsp directive"));
4758
4759 /* Generate opcode to restore the value. */
4760 op = 0x90 | reg;
4761 add_unwind_opcode (op, 1);
4762
4763 /* Record the information for later. */
4764 unwind.fp_reg = reg;
4765 unwind.fp_offset = unwind.frame_size - offset;
4766 unwind.sp_restored = 1;
4767}
4768
4769/* Parse an unwind_pad directive. */
4770
4771static void
4772s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4773{
4774 int offset;
4775
4776 if (!unwind.proc_start)
4777 as_bad (MISSING_FNSTART);
4778
4779 if (immediate_for_directive (&offset) == FAIL)
4780 return;
4781
4782 if (offset & 3)
4783 {
4784 as_bad (_("stack increment must be multiple of 4"));
4785 ignore_rest_of_line ();
4786 return;
4787 }
4788
4789 /* Don't generate any opcodes, just record the details for later. */
4790 unwind.frame_size += offset;
4791 unwind.pending_offset += offset;
4792
4793 demand_empty_rest_of_line ();
4794}
4795
4796/* Parse an unwind_setfp directive. */
4797
4798static void
4799s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4800{
4801 int sp_reg;
4802 int fp_reg;
4803 int offset;
4804
4805 if (!unwind.proc_start)
4806 as_bad (MISSING_FNSTART);
4807
4808 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4809 if (skip_past_comma (&input_line_pointer) == FAIL)
4810 sp_reg = FAIL;
4811 else
4812 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4813
4814 if (fp_reg == FAIL || sp_reg == FAIL)
4815 {
4816 as_bad (_("expected <reg>, <reg>"));
4817 ignore_rest_of_line ();
4818 return;
4819 }
4820
4821 /* Optional constant. */
4822 if (skip_past_comma (&input_line_pointer) != FAIL)
4823 {
4824 if (immediate_for_directive (&offset) == FAIL)
4825 return;
4826 }
4827 else
4828 offset = 0;
4829
4830 demand_empty_rest_of_line ();
4831
4832 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4833 {
4834 as_bad (_("register must be either sp or set by a previous"
4835 "unwind_movsp directive"));
4836 return;
4837 }
4838
4839 /* Don't generate any opcodes, just record the information for later. */
4840 unwind.fp_reg = fp_reg;
4841 unwind.fp_used = 1;
4842 if (sp_reg == REG_SP)
4843 unwind.fp_offset = unwind.frame_size - offset;
4844 else
4845 unwind.fp_offset -= offset;
4846}
4847
4848/* Parse an unwind_raw directive. */
4849
4850static void
4851s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4852{
4853 expressionS exp;
4854 /* This is an arbitrary limit. */
4855 unsigned char op[16];
4856 int count;
4857
4858 if (!unwind.proc_start)
4859 as_bad (MISSING_FNSTART);
4860
4861 expression (&exp);
4862 if (exp.X_op == O_constant
4863 && skip_past_comma (&input_line_pointer) != FAIL)
4864 {
4865 unwind.frame_size += exp.X_add_number;
4866 expression (&exp);
4867 }
4868 else
4869 exp.X_op = O_illegal;
4870
4871 if (exp.X_op != O_constant)
4872 {
4873 as_bad (_("expected <offset>, <opcode>"));
4874 ignore_rest_of_line ();
4875 return;
4876 }
4877
4878 count = 0;
4879
4880 /* Parse the opcode. */
4881 for (;;)
4882 {
4883 if (count >= 16)
4884 {
4885 as_bad (_("unwind opcode too long"));
4886 ignore_rest_of_line ();
4887 }
4888 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4889 {
4890 as_bad (_("invalid unwind opcode"));
4891 ignore_rest_of_line ();
4892 return;
4893 }
4894 op[count++] = exp.X_add_number;
4895
4896 /* Parse the next byte. */
4897 if (skip_past_comma (&input_line_pointer) == FAIL)
4898 break;
4899
4900 expression (&exp);
4901 }
4902
4903 /* Add the opcode bytes in reverse order. */
4904 while (count--)
4905 add_unwind_opcode (op[count], 1);
4906
4907 demand_empty_rest_of_line ();
4908}
4909
4910
4911/* Parse a .eabi_attribute directive. */
4912
4913static void
4914s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4915{
4916 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4917
4918 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4919 attributes_set_explicitly[tag] = 1;
4920}
4921
4922/* Emit a tls fix for the symbol. */
4923
4924static void
4925s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4926{
4927 char *p;
4928 expressionS exp;
4929#ifdef md_flush_pending_output
4930 md_flush_pending_output ();
4931#endif
4932
4933#ifdef md_cons_align
4934 md_cons_align (4);
4935#endif
4936
4937 /* Since we're just labelling the code, there's no need to define a
4938 mapping symbol. */
4939 expression (&exp);
4940 p = obstack_next_free (&frchain_now->frch_obstack);
4941 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4942 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4943 : BFD_RELOC_ARM_TLS_DESCSEQ);
4944}
4945#endif /* OBJ_ELF */
4946
4947static void s_arm_arch (int);
4948static void s_arm_object_arch (int);
4949static void s_arm_cpu (int);
4950static void s_arm_fpu (int);
4951static void s_arm_arch_extension (int);
4952
4953#ifdef TE_PE
4954
4955static void
4956pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4957{
4958 expressionS exp;
4959
4960 do
4961 {
4962 expression (&exp);
4963 if (exp.X_op == O_symbol)
4964 exp.X_op = O_secrel;
4965
4966 emit_expr (&exp, 4);
4967 }
4968 while (*input_line_pointer++ == ',');
4969
4970 input_line_pointer--;
4971 demand_empty_rest_of_line ();
4972}
4973#endif /* TE_PE */
4974
4975int
4976arm_is_largest_exponent_ok (int precision)
4977{
4978 /* precision == 1 ensures that this will only return
4979 true for 16 bit floats. */
4980 return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
4981}
4982
4983static void
4984set_fp16_format (int dummy ATTRIBUTE_UNUSED)
4985{
4986 char saved_char;
4987 char* name;
4988 enum fp_16bit_format new_format;
4989
4990 new_format = ARM_FP16_FORMAT_DEFAULT;
4991
4992 name = input_line_pointer;
4993 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
4994 input_line_pointer++;
4995
4996 saved_char = *input_line_pointer;
4997 *input_line_pointer = 0;
4998
4999 if (strcasecmp (name, "ieee") == 0)
5000 new_format = ARM_FP16_FORMAT_IEEE;
5001 else if (strcasecmp (name, "alternative") == 0)
5002 new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5003 else
5004 {
5005 as_bad (_("unrecognised float16 format \"%s\""), name);
5006 goto cleanup;
5007 }
5008
5009 /* Only set fp16_format if it is still the default (aka not already
5010 been set yet). */
5011 if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5012 fp16_format = new_format;
5013 else
5014 {
5015 if (new_format != fp16_format)
5016 as_warn (_("float16 format cannot be set more than once, ignoring."));
5017 }
5018
5019cleanup:
5020 *input_line_pointer = saved_char;
5021 ignore_rest_of_line ();
5022}
5023
5024/* This table describes all the machine specific pseudo-ops the assembler
5025 has to support. The fields are:
5026 pseudo-op name without dot
5027 function to call to execute this pseudo-op
5028 Integer arg to pass to the function. */
5029
5030const pseudo_typeS md_pseudo_table[] =
5031{
5032 /* Never called because '.req' does not start a line. */
5033 { "req", s_req, 0 },
5034 /* Following two are likewise never called. */
5035 { "dn", s_dn, 0 },
5036 { "qn", s_qn, 0 },
5037 { "unreq", s_unreq, 0 },
5038 { "bss", s_bss, 0 },
5039 { "align", s_align_ptwo, 2 },
5040 { "arm", s_arm, 0 },
5041 { "thumb", s_thumb, 0 },
5042 { "code", s_code, 0 },
5043 { "force_thumb", s_force_thumb, 0 },
5044 { "thumb_func", s_thumb_func, 0 },
5045 { "thumb_set", s_thumb_set, 0 },
5046 { "even", s_even, 0 },
5047 { "ltorg", s_ltorg, 0 },
5048 { "pool", s_ltorg, 0 },
5049 { "syntax", s_syntax, 0 },
5050 { "cpu", s_arm_cpu, 0 },
5051 { "arch", s_arm_arch, 0 },
5052 { "object_arch", s_arm_object_arch, 0 },
5053 { "fpu", s_arm_fpu, 0 },
5054 { "arch_extension", s_arm_arch_extension, 0 },
5055#ifdef OBJ_ELF
5056 { "word", s_arm_elf_cons, 4 },
5057 { "long", s_arm_elf_cons, 4 },
5058 { "inst.n", s_arm_elf_inst, 2 },
5059 { "inst.w", s_arm_elf_inst, 4 },
5060 { "inst", s_arm_elf_inst, 0 },
5061 { "rel31", s_arm_rel31, 0 },
5062 { "fnstart", s_arm_unwind_fnstart, 0 },
5063 { "fnend", s_arm_unwind_fnend, 0 },
5064 { "cantunwind", s_arm_unwind_cantunwind, 0 },
5065 { "personality", s_arm_unwind_personality, 0 },
5066 { "personalityindex", s_arm_unwind_personalityindex, 0 },
5067 { "handlerdata", s_arm_unwind_handlerdata, 0 },
5068 { "save", s_arm_unwind_save, 0 },
5069 { "vsave", s_arm_unwind_save, 1 },
5070 { "movsp", s_arm_unwind_movsp, 0 },
5071 { "pad", s_arm_unwind_pad, 0 },
5072 { "setfp", s_arm_unwind_setfp, 0 },
5073 { "unwind_raw", s_arm_unwind_raw, 0 },
5074 { "eabi_attribute", s_arm_eabi_attribute, 0 },
5075 { "tlsdescseq", s_arm_tls_descseq, 0 },
5076#else
5077 { "word", cons, 4},
5078
5079 /* These are used for dwarf. */
5080 {"2byte", cons, 2},
5081 {"4byte", cons, 4},
5082 {"8byte", cons, 8},
5083 /* These are used for dwarf2. */
5084 { "file", dwarf2_directive_file, 0 },
5085 { "loc", dwarf2_directive_loc, 0 },
5086 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5087#endif
5088 { "extend", float_cons, 'x' },
5089 { "ldouble", float_cons, 'x' },
5090 { "packed", float_cons, 'p' },
5091#ifdef TE_PE
5092 {"secrel32", pe_directive_secrel, 0},
5093#endif
5094
5095 /* These are for compatibility with CodeComposer Studio. */
5096 {"ref", s_ccs_ref, 0},
5097 {"def", s_ccs_def, 0},
5098 {"asmfunc", s_ccs_asmfunc, 0},
5099 {"endasmfunc", s_ccs_endasmfunc, 0},
5100
5101 {"float16", float_cons, 'h' },
5102 {"float16_format", set_fp16_format, 0 },
5103
5104 { 0, 0, 0 }
5105};
5106
5107/* Parser functions used exclusively in instruction operands. */
5108
5109/* Generic immediate-value read function for use in insn parsing.
5110 STR points to the beginning of the immediate (the leading #);
5111 VAL receives the value; if the value is outside [MIN, MAX]
5112 issue an error. PREFIX_OPT is true if the immediate prefix is
5113 optional. */
5114
5115static int
5116parse_immediate (char **str, int *val, int min, int max,
5117 bfd_boolean prefix_opt)
5118{
5119 expressionS exp;
5120
5121 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5122 if (exp.X_op != O_constant)
5123 {
5124 inst.error = _("constant expression required");
5125 return FAIL;
5126 }
5127
5128 if (exp.X_add_number < min || exp.X_add_number > max)
5129 {
5130 inst.error = _("immediate value out of range");
5131 return FAIL;
5132 }
5133
5134 *val = exp.X_add_number;
5135 return SUCCESS;
5136}
5137
5138/* Less-generic immediate-value read function with the possibility of loading a
5139 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5140 instructions. Puts the result directly in inst.operands[i]. */
5141
5142static int
5143parse_big_immediate (char **str, int i, expressionS *in_exp,
5144 bfd_boolean allow_symbol_p)
5145{
5146 expressionS exp;
5147 expressionS *exp_p = in_exp ? in_exp : &exp;
5148 char *ptr = *str;
5149
5150 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5151
5152 if (exp_p->X_op == O_constant)
5153 {
5154 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5155 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5156 O_constant. We have to be careful not to break compilation for
5157 32-bit X_add_number, though. */
5158 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5159 {
5160 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5161 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5162 & 0xffffffff);
5163 inst.operands[i].regisimm = 1;
5164 }
5165 }
5166 else if (exp_p->X_op == O_big
5167 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5168 {
5169 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5170
5171 /* Bignums have their least significant bits in
5172 generic_bignum[0]. Make sure we put 32 bits in imm and
5173 32 bits in reg, in a (hopefully) portable way. */
5174 gas_assert (parts != 0);
5175
5176 /* Make sure that the number is not too big.
5177 PR 11972: Bignums can now be sign-extended to the
5178 size of a .octa so check that the out of range bits
5179 are all zero or all one. */
5180 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5181 {
5182 LITTLENUM_TYPE m = -1;
5183
5184 if (generic_bignum[parts * 2] != 0
5185 && generic_bignum[parts * 2] != m)
5186 return FAIL;
5187
5188 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5189 if (generic_bignum[j] != generic_bignum[j-1])
5190 return FAIL;
5191 }
5192
5193 inst.operands[i].imm = 0;
5194 for (j = 0; j < parts; j++, idx++)
5195 inst.operands[i].imm |= generic_bignum[idx]
5196 << (LITTLENUM_NUMBER_OF_BITS * j);
5197 inst.operands[i].reg = 0;
5198 for (j = 0; j < parts; j++, idx++)
5199 inst.operands[i].reg |= generic_bignum[idx]
5200 << (LITTLENUM_NUMBER_OF_BITS * j);
5201 inst.operands[i].regisimm = 1;
5202 }
5203 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5204 return FAIL;
5205
5206 *str = ptr;
5207
5208 return SUCCESS;
5209}
5210
5211/* Returns the pseudo-register number of an FPA immediate constant,
5212 or FAIL if there isn't a valid constant here. */
5213
5214static int
5215parse_fpa_immediate (char ** str)
5216{
5217 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5218 char * save_in;
5219 expressionS exp;
5220 int i;
5221 int j;
5222
5223 /* First try and match exact strings, this is to guarantee
5224 that some formats will work even for cross assembly. */
5225
5226 for (i = 0; fp_const[i]; i++)
5227 {
5228 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5229 {
5230 char *start = *str;
5231
5232 *str += strlen (fp_const[i]);
5233 if (is_end_of_line[(unsigned char) **str])
5234 return i + 8;
5235 *str = start;
5236 }
5237 }
5238
5239 /* Just because we didn't get a match doesn't mean that the constant
5240 isn't valid, just that it is in a format that we don't
5241 automatically recognize. Try parsing it with the standard
5242 expression routines. */
5243
5244 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5245
5246 /* Look for a raw floating point number. */
5247 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5248 && is_end_of_line[(unsigned char) *save_in])
5249 {
5250 for (i = 0; i < NUM_FLOAT_VALS; i++)
5251 {
5252 for (j = 0; j < MAX_LITTLENUMS; j++)
5253 {
5254 if (words[j] != fp_values[i][j])
5255 break;
5256 }
5257
5258 if (j == MAX_LITTLENUMS)
5259 {
5260 *str = save_in;
5261 return i + 8;
5262 }
5263 }
5264 }
5265
5266 /* Try and parse a more complex expression, this will probably fail
5267 unless the code uses a floating point prefix (eg "0f"). */
5268 save_in = input_line_pointer;
5269 input_line_pointer = *str;
5270 if (expression (&exp) == absolute_section
5271 && exp.X_op == O_big
5272 && exp.X_add_number < 0)
5273 {
5274 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5275 Ditto for 15. */
5276#define X_PRECISION 5
5277#define E_PRECISION 15L
5278 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5279 {
5280 for (i = 0; i < NUM_FLOAT_VALS; i++)
5281 {
5282 for (j = 0; j < MAX_LITTLENUMS; j++)
5283 {
5284 if (words[j] != fp_values[i][j])
5285 break;
5286 }
5287
5288 if (j == MAX_LITTLENUMS)
5289 {
5290 *str = input_line_pointer;
5291 input_line_pointer = save_in;
5292 return i + 8;
5293 }
5294 }
5295 }
5296 }
5297
5298 *str = input_line_pointer;
5299 input_line_pointer = save_in;
5300 inst.error = _("invalid FPA immediate expression");
5301 return FAIL;
5302}
5303
5304/* Returns 1 if a number has "quarter-precision" float format
5305 0baBbbbbbc defgh000 00000000 00000000. */
5306
5307static int
5308is_quarter_float (unsigned imm)
5309{
5310 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5311 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5312}
5313
5314
5315/* Detect the presence of a floating point or integer zero constant,
5316 i.e. #0.0 or #0. */
5317
5318static bfd_boolean
5319parse_ifimm_zero (char **in)
5320{
5321 int error_code;
5322
5323 if (!is_immediate_prefix (**in))
5324 {
5325 /* In unified syntax, all prefixes are optional. */
5326 if (!unified_syntax)
5327 return FALSE;
5328 }
5329 else
5330 ++*in;
5331
5332 /* Accept #0x0 as a synonym for #0. */
5333 if (strncmp (*in, "0x", 2) == 0)
5334 {
5335 int val;
5336 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5337 return FALSE;
5338 return TRUE;
5339 }
5340
5341 error_code = atof_generic (in, ".", EXP_CHARS,
5342 &generic_floating_point_number);
5343
5344 if (!error_code
5345 && generic_floating_point_number.sign == '+'
5346 && (generic_floating_point_number.low
5347 > generic_floating_point_number.leader))
5348 return TRUE;
5349
5350 return FALSE;
5351}
5352
5353/* Parse an 8-bit "quarter-precision" floating point number of the form:
5354 0baBbbbbbc defgh000 00000000 00000000.
5355 The zero and minus-zero cases need special handling, since they can't be
5356 encoded in the "quarter-precision" float format, but can nonetheless be
5357 loaded as integer constants. */
5358
5359static unsigned
5360parse_qfloat_immediate (char **ccp, int *immed)
5361{
5362 char *str = *ccp;
5363 char *fpnum;
5364 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5365 int found_fpchar = 0;
5366
5367 skip_past_char (&str, '#');
5368
5369 /* We must not accidentally parse an integer as a floating-point number. Make
5370 sure that the value we parse is not an integer by checking for special
5371 characters '.' or 'e'.
5372 FIXME: This is a horrible hack, but doing better is tricky because type
5373 information isn't in a very usable state at parse time. */
5374 fpnum = str;
5375 skip_whitespace (fpnum);
5376
5377 if (strncmp (fpnum, "0x", 2) == 0)
5378 return FAIL;
5379 else
5380 {
5381 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5382 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5383 {
5384 found_fpchar = 1;
5385 break;
5386 }
5387
5388 if (!found_fpchar)
5389 return FAIL;
5390 }
5391
5392 if ((str = atof_ieee (str, 's', words)) != NULL)
5393 {
5394 unsigned fpword = 0;
5395 int i;
5396
5397 /* Our FP word must be 32 bits (single-precision FP). */
5398 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5399 {
5400 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5401 fpword |= words[i];
5402 }
5403
5404 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5405 *immed = fpword;
5406 else
5407 return FAIL;
5408
5409 *ccp = str;
5410
5411 return SUCCESS;
5412 }
5413
5414 return FAIL;
5415}
5416
5417/* Shift operands. */
5418enum shift_kind
5419{
5420 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5421};
5422
5423struct asm_shift_name
5424{
5425 const char *name;
5426 enum shift_kind kind;
5427};
5428
5429/* Third argument to parse_shift. */
5430enum parse_shift_mode
5431{
5432 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5433 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5434 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5435 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5436 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5437 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5438};
5439
5440/* Parse a <shift> specifier on an ARM data processing instruction.
5441 This has three forms:
5442
5443 (LSL|LSR|ASL|ASR|ROR) Rs
5444 (LSL|LSR|ASL|ASR|ROR) #imm
5445 RRX
5446
5447 Note that ASL is assimilated to LSL in the instruction encoding, and
5448 RRX to ROR #0 (which cannot be written as such). */
5449
5450static int
5451parse_shift (char **str, int i, enum parse_shift_mode mode)
5452{
5453 const struct asm_shift_name *shift_name;
5454 enum shift_kind shift;
5455 char *s = *str;
5456 char *p = s;
5457 int reg;
5458
5459 for (p = *str; ISALPHA (*p); p++)
5460 ;
5461
5462 if (p == *str)
5463 {
5464 inst.error = _("shift expression expected");
5465 return FAIL;
5466 }
5467
5468 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5469 p - *str);
5470
5471 if (shift_name == NULL)
5472 {
5473 inst.error = _("shift expression expected");
5474 return FAIL;
5475 }
5476
5477 shift = shift_name->kind;
5478
5479 switch (mode)
5480 {
5481 case NO_SHIFT_RESTRICT:
5482 case SHIFT_IMMEDIATE:
5483 if (shift == SHIFT_UXTW)
5484 {
5485 inst.error = _("'UXTW' not allowed here");
5486 return FAIL;
5487 }
5488 break;
5489
5490 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5491 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5492 {
5493 inst.error = _("'LSL' or 'ASR' required");
5494 return FAIL;
5495 }
5496 break;
5497
5498 case SHIFT_LSL_IMMEDIATE:
5499 if (shift != SHIFT_LSL)
5500 {
5501 inst.error = _("'LSL' required");
5502 return FAIL;
5503 }
5504 break;
5505
5506 case SHIFT_ASR_IMMEDIATE:
5507 if (shift != SHIFT_ASR)
5508 {
5509 inst.error = _("'ASR' required");
5510 return FAIL;
5511 }
5512 break;
5513 case SHIFT_UXTW_IMMEDIATE:
5514 if (shift != SHIFT_UXTW)
5515 {
5516 inst.error = _("'UXTW' required");
5517 return FAIL;
5518 }
5519 break;
5520
5521 default: abort ();
5522 }
5523
5524 if (shift != SHIFT_RRX)
5525 {
5526 /* Whitespace can appear here if the next thing is a bare digit. */
5527 skip_whitespace (p);
5528
5529 if (mode == NO_SHIFT_RESTRICT
5530 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5531 {
5532 inst.operands[i].imm = reg;
5533 inst.operands[i].immisreg = 1;
5534 }
5535 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5536 return FAIL;
5537 }
5538 inst.operands[i].shift_kind = shift;
5539 inst.operands[i].shifted = 1;
5540 *str = p;
5541 return SUCCESS;
5542}
5543
5544/* Parse a <shifter_operand> for an ARM data processing instruction:
5545
5546 #<immediate>
5547 #<immediate>, <rotate>
5548 <Rm>
5549 <Rm>, <shift>
5550
5551 where <shift> is defined by parse_shift above, and <rotate> is a
5552 multiple of 2 between 0 and 30. Validation of immediate operands
5553 is deferred to md_apply_fix. */
5554
5555static int
5556parse_shifter_operand (char **str, int i)
5557{
5558 int value;
5559 expressionS exp;
5560
5561 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5562 {
5563 inst.operands[i].reg = value;
5564 inst.operands[i].isreg = 1;
5565
5566 /* parse_shift will override this if appropriate */
5567 inst.relocs[0].exp.X_op = O_constant;
5568 inst.relocs[0].exp.X_add_number = 0;
5569
5570 if (skip_past_comma (str) == FAIL)
5571 return SUCCESS;
5572
5573 /* Shift operation on register. */
5574 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5575 }
5576
5577 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5578 return FAIL;
5579
5580 if (skip_past_comma (str) == SUCCESS)
5581 {
5582 /* #x, y -- ie explicit rotation by Y. */
5583 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5584 return FAIL;
5585
5586 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5587 {
5588 inst.error = _("constant expression expected");
5589 return FAIL;
5590 }
5591
5592 value = exp.X_add_number;
5593 if (value < 0 || value > 30 || value % 2 != 0)
5594 {
5595 inst.error = _("invalid rotation");
5596 return FAIL;
5597 }
5598 if (inst.relocs[0].exp.X_add_number < 0
5599 || inst.relocs[0].exp.X_add_number > 255)
5600 {
5601 inst.error = _("invalid constant");
5602 return FAIL;
5603 }
5604
5605 /* Encode as specified. */
5606 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5607 return SUCCESS;
5608 }
5609
5610 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5611 inst.relocs[0].pc_rel = 0;
5612 return SUCCESS;
5613}
5614
5615/* Group relocation information. Each entry in the table contains the
5616 textual name of the relocation as may appear in assembler source
5617 and must end with a colon.
5618 Along with this textual name are the relocation codes to be used if
5619 the corresponding instruction is an ALU instruction (ADD or SUB only),
5620 an LDR, an LDRS, or an LDC. */
5621
5622struct group_reloc_table_entry
5623{
5624 const char *name;
5625 int alu_code;
5626 int ldr_code;
5627 int ldrs_code;
5628 int ldc_code;
5629};
5630
5631typedef enum
5632{
5633 /* Varieties of non-ALU group relocation. */
5634
5635 GROUP_LDR,
5636 GROUP_LDRS,
5637 GROUP_LDC,
5638 GROUP_MVE
5639} group_reloc_type;
5640
5641static struct group_reloc_table_entry group_reloc_table[] =
5642 { /* Program counter relative: */
5643 { "pc_g0_nc",
5644 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5645 0, /* LDR */
5646 0, /* LDRS */
5647 0 }, /* LDC */
5648 { "pc_g0",
5649 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5650 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5651 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5652 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5653 { "pc_g1_nc",
5654 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5655 0, /* LDR */
5656 0, /* LDRS */
5657 0 }, /* LDC */
5658 { "pc_g1",
5659 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5660 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5661 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5662 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5663 { "pc_g2",
5664 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5665 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5666 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5667 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5668 /* Section base relative */
5669 { "sb_g0_nc",
5670 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5671 0, /* LDR */
5672 0, /* LDRS */
5673 0 }, /* LDC */
5674 { "sb_g0",
5675 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5676 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5677 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5678 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5679 { "sb_g1_nc",
5680 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5681 0, /* LDR */
5682 0, /* LDRS */
5683 0 }, /* LDC */
5684 { "sb_g1",
5685 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5686 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5687 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5688 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5689 { "sb_g2",
5690 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5691 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5692 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5693 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5694 /* Absolute thumb alu relocations. */
5695 { "lower0_7",
5696 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5697 0, /* LDR. */
5698 0, /* LDRS. */
5699 0 }, /* LDC. */
5700 { "lower8_15",
5701 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5702 0, /* LDR. */
5703 0, /* LDRS. */
5704 0 }, /* LDC. */
5705 { "upper0_7",
5706 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5707 0, /* LDR. */
5708 0, /* LDRS. */
5709 0 }, /* LDC. */
5710 { "upper8_15",
5711 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5712 0, /* LDR. */
5713 0, /* LDRS. */
5714 0 } }; /* LDC. */
5715
5716/* Given the address of a pointer pointing to the textual name of a group
5717 relocation as may appear in assembler source, attempt to find its details
5718 in group_reloc_table. The pointer will be updated to the character after
5719 the trailing colon. On failure, FAIL will be returned; SUCCESS
5720 otherwise. On success, *entry will be updated to point at the relevant
5721 group_reloc_table entry. */
5722
5723static int
5724find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5725{
5726 unsigned int i;
5727 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5728 {
5729 int length = strlen (group_reloc_table[i].name);
5730
5731 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5732 && (*str)[length] == ':')
5733 {
5734 *out = &group_reloc_table[i];
5735 *str += (length + 1);
5736 return SUCCESS;
5737 }
5738 }
5739
5740 return FAIL;
5741}
5742
5743/* Parse a <shifter_operand> for an ARM data processing instruction
5744 (as for parse_shifter_operand) where group relocations are allowed:
5745
5746 #<immediate>
5747 #<immediate>, <rotate>
5748 #:<group_reloc>:<expression>
5749 <Rm>
5750 <Rm>, <shift>
5751
5752 where <group_reloc> is one of the strings defined in group_reloc_table.
5753 The hashes are optional.
5754
5755 Everything else is as for parse_shifter_operand. */
5756
5757static parse_operand_result
5758parse_shifter_operand_group_reloc (char **str, int i)
5759{
5760 /* Determine if we have the sequence of characters #: or just :
5761 coming next. If we do, then we check for a group relocation.
5762 If we don't, punt the whole lot to parse_shifter_operand. */
5763
5764 if (((*str)[0] == '#' && (*str)[1] == ':')
5765 || (*str)[0] == ':')
5766 {
5767 struct group_reloc_table_entry *entry;
5768
5769 if ((*str)[0] == '#')
5770 (*str) += 2;
5771 else
5772 (*str)++;
5773
5774 /* Try to parse a group relocation. Anything else is an error. */
5775 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5776 {
5777 inst.error = _("unknown group relocation");
5778 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5779 }
5780
5781 /* We now have the group relocation table entry corresponding to
5782 the name in the assembler source. Next, we parse the expression. */
5783 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5784 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5785
5786 /* Record the relocation type (always the ALU variant here). */
5787 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5788 gas_assert (inst.relocs[0].type != 0);
5789
5790 return PARSE_OPERAND_SUCCESS;
5791 }
5792 else
5793 return parse_shifter_operand (str, i) == SUCCESS
5794 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5795
5796 /* Never reached. */
5797}
5798
5799/* Parse a Neon alignment expression. Information is written to
5800 inst.operands[i]. We assume the initial ':' has been skipped.
5801
5802 align .imm = align << 8, .immisalign=1, .preind=0 */
5803static parse_operand_result
5804parse_neon_alignment (char **str, int i)
5805{
5806 char *p = *str;
5807 expressionS exp;
5808
5809 my_get_expression (&exp, &p, GE_NO_PREFIX);
5810
5811 if (exp.X_op != O_constant)
5812 {
5813 inst.error = _("alignment must be constant");
5814 return PARSE_OPERAND_FAIL;
5815 }
5816
5817 inst.operands[i].imm = exp.X_add_number << 8;
5818 inst.operands[i].immisalign = 1;
5819 /* Alignments are not pre-indexes. */
5820 inst.operands[i].preind = 0;
5821
5822 *str = p;
5823 return PARSE_OPERAND_SUCCESS;
5824}
5825
5826/* Parse all forms of an ARM address expression. Information is written
5827 to inst.operands[i] and/or inst.relocs[0].
5828
5829 Preindexed addressing (.preind=1):
5830
5831 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5832 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5833 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5834 .shift_kind=shift .relocs[0].exp=shift_imm
5835
5836 These three may have a trailing ! which causes .writeback to be set also.
5837
5838 Postindexed addressing (.postind=1, .writeback=1):
5839
5840 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5841 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5842 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5843 .shift_kind=shift .relocs[0].exp=shift_imm
5844
5845 Unindexed addressing (.preind=0, .postind=0):
5846
5847 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5848
5849 Other:
5850
5851 [Rn]{!} shorthand for [Rn,#0]{!}
5852 =immediate .isreg=0 .relocs[0].exp=immediate
5853 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5854
5855 It is the caller's responsibility to check for addressing modes not
5856 supported by the instruction, and to set inst.relocs[0].type. */
5857
5858static parse_operand_result
5859parse_address_main (char **str, int i, int group_relocations,
5860 group_reloc_type group_type)
5861{
5862 char *p = *str;
5863 int reg;
5864
5865 if (skip_past_char (&p, '[') == FAIL)
5866 {
5867 if (skip_past_char (&p, '=') == FAIL)
5868 {
5869 /* Bare address - translate to PC-relative offset. */
5870 inst.relocs[0].pc_rel = 1;
5871 inst.operands[i].reg = REG_PC;
5872 inst.operands[i].isreg = 1;
5873 inst.operands[i].preind = 1;
5874
5875 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5876 return PARSE_OPERAND_FAIL;
5877 }
5878 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5879 /*allow_symbol_p=*/TRUE))
5880 return PARSE_OPERAND_FAIL;
5881
5882 *str = p;
5883 return PARSE_OPERAND_SUCCESS;
5884 }
5885
5886 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5887 skip_whitespace (p);
5888
5889 if (group_type == GROUP_MVE)
5890 {
5891 enum arm_reg_type rtype = REG_TYPE_MQ;
5892 struct neon_type_el et;
5893 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5894 {
5895 inst.operands[i].isquad = 1;
5896 }
5897 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5898 {
5899 inst.error = BAD_ADDR_MODE;
5900 return PARSE_OPERAND_FAIL;
5901 }
5902 }
5903 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5904 {
5905 if (group_type == GROUP_MVE)
5906 inst.error = BAD_ADDR_MODE;
5907 else
5908 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5909 return PARSE_OPERAND_FAIL;
5910 }
5911 inst.operands[i].reg = reg;
5912 inst.operands[i].isreg = 1;
5913
5914 if (skip_past_comma (&p) == SUCCESS)
5915 {
5916 inst.operands[i].preind = 1;
5917
5918 if (*p == '+') p++;
5919 else if (*p == '-') p++, inst.operands[i].negative = 1;
5920
5921 enum arm_reg_type rtype = REG_TYPE_MQ;
5922 struct neon_type_el et;
5923 if (group_type == GROUP_MVE
5924 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5925 {
5926 inst.operands[i].immisreg = 2;
5927 inst.operands[i].imm = reg;
5928
5929 if (skip_past_comma (&p) == SUCCESS)
5930 {
5931 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
5932 {
5933 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
5934 inst.relocs[0].exp.X_add_number = 0;
5935 }
5936 else
5937 return PARSE_OPERAND_FAIL;
5938 }
5939 }
5940 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5941 {
5942 inst.operands[i].imm = reg;
5943 inst.operands[i].immisreg = 1;
5944
5945 if (skip_past_comma (&p) == SUCCESS)
5946 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5947 return PARSE_OPERAND_FAIL;
5948 }
5949 else if (skip_past_char (&p, ':') == SUCCESS)
5950 {
5951 /* FIXME: '@' should be used here, but it's filtered out by generic
5952 code before we get to see it here. This may be subject to
5953 change. */
5954 parse_operand_result result = parse_neon_alignment (&p, i);
5955
5956 if (result != PARSE_OPERAND_SUCCESS)
5957 return result;
5958 }
5959 else
5960 {
5961 if (inst.operands[i].negative)
5962 {
5963 inst.operands[i].negative = 0;
5964 p--;
5965 }
5966
5967 if (group_relocations
5968 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5969 {
5970 struct group_reloc_table_entry *entry;
5971
5972 /* Skip over the #: or : sequence. */
5973 if (*p == '#')
5974 p += 2;
5975 else
5976 p++;
5977
5978 /* Try to parse a group relocation. Anything else is an
5979 error. */
5980 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5981 {
5982 inst.error = _("unknown group relocation");
5983 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5984 }
5985
5986 /* We now have the group relocation table entry corresponding to
5987 the name in the assembler source. Next, we parse the
5988 expression. */
5989 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5990 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5991
5992 /* Record the relocation type. */
5993 switch (group_type)
5994 {
5995 case GROUP_LDR:
5996 inst.relocs[0].type
5997 = (bfd_reloc_code_real_type) entry->ldr_code;
5998 break;
5999
6000 case GROUP_LDRS:
6001 inst.relocs[0].type
6002 = (bfd_reloc_code_real_type) entry->ldrs_code;
6003 break;
6004
6005 case GROUP_LDC:
6006 inst.relocs[0].type
6007 = (bfd_reloc_code_real_type) entry->ldc_code;
6008 break;
6009
6010 default:
6011 gas_assert (0);
6012 }
6013
6014 if (inst.relocs[0].type == 0)
6015 {
6016 inst.error = _("this group relocation is not allowed on this instruction");
6017 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6018 }
6019 }
6020 else
6021 {
6022 char *q = p;
6023
6024 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6025 return PARSE_OPERAND_FAIL;
6026 /* If the offset is 0, find out if it's a +0 or -0. */
6027 if (inst.relocs[0].exp.X_op == O_constant
6028 && inst.relocs[0].exp.X_add_number == 0)
6029 {
6030 skip_whitespace (q);
6031 if (*q == '#')
6032 {
6033 q++;
6034 skip_whitespace (q);
6035 }
6036 if (*q == '-')
6037 inst.operands[i].negative = 1;
6038 }
6039 }
6040 }
6041 }
6042 else if (skip_past_char (&p, ':') == SUCCESS)
6043 {
6044 /* FIXME: '@' should be used here, but it's filtered out by generic code
6045 before we get to see it here. This may be subject to change. */
6046 parse_operand_result result = parse_neon_alignment (&p, i);
6047
6048 if (result != PARSE_OPERAND_SUCCESS)
6049 return result;
6050 }
6051
6052 if (skip_past_char (&p, ']') == FAIL)
6053 {
6054 inst.error = _("']' expected");
6055 return PARSE_OPERAND_FAIL;
6056 }
6057
6058 if (skip_past_char (&p, '!') == SUCCESS)
6059 inst.operands[i].writeback = 1;
6060
6061 else if (skip_past_comma (&p) == SUCCESS)
6062 {
6063 if (skip_past_char (&p, '{') == SUCCESS)
6064 {
6065 /* [Rn], {expr} - unindexed, with option */
6066 if (parse_immediate (&p, &inst.operands[i].imm,
6067 0, 255, TRUE) == FAIL)
6068 return PARSE_OPERAND_FAIL;
6069
6070 if (skip_past_char (&p, '}') == FAIL)
6071 {
6072 inst.error = _("'}' expected at end of 'option' field");
6073 return PARSE_OPERAND_FAIL;
6074 }
6075 if (inst.operands[i].preind)
6076 {
6077 inst.error = _("cannot combine index with option");
6078 return PARSE_OPERAND_FAIL;
6079 }
6080 *str = p;
6081 return PARSE_OPERAND_SUCCESS;
6082 }
6083 else
6084 {
6085 inst.operands[i].postind = 1;
6086 inst.operands[i].writeback = 1;
6087
6088 if (inst.operands[i].preind)
6089 {
6090 inst.error = _("cannot combine pre- and post-indexing");
6091 return PARSE_OPERAND_FAIL;
6092 }
6093
6094 if (*p == '+') p++;
6095 else if (*p == '-') p++, inst.operands[i].negative = 1;
6096
6097 enum arm_reg_type rtype = REG_TYPE_MQ;
6098 struct neon_type_el et;
6099 if (group_type == GROUP_MVE
6100 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6101 {
6102 inst.operands[i].immisreg = 2;
6103 inst.operands[i].imm = reg;
6104 }
6105 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6106 {
6107 /* We might be using the immediate for alignment already. If we
6108 are, OR the register number into the low-order bits. */
6109 if (inst.operands[i].immisalign)
6110 inst.operands[i].imm |= reg;
6111 else
6112 inst.operands[i].imm = reg;
6113 inst.operands[i].immisreg = 1;
6114
6115 if (skip_past_comma (&p) == SUCCESS)
6116 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6117 return PARSE_OPERAND_FAIL;
6118 }
6119 else
6120 {
6121 char *q = p;
6122
6123 if (inst.operands[i].negative)
6124 {
6125 inst.operands[i].negative = 0;
6126 p--;
6127 }
6128 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6129 return PARSE_OPERAND_FAIL;
6130 /* If the offset is 0, find out if it's a +0 or -0. */
6131 if (inst.relocs[0].exp.X_op == O_constant
6132 && inst.relocs[0].exp.X_add_number == 0)
6133 {
6134 skip_whitespace (q);
6135 if (*q == '#')
6136 {
6137 q++;
6138 skip_whitespace (q);
6139 }
6140 if (*q == '-')
6141 inst.operands[i].negative = 1;
6142 }
6143 }
6144 }
6145 }
6146
6147 /* If at this point neither .preind nor .postind is set, we have a
6148 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6149 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6150 {
6151 inst.operands[i].preind = 1;
6152 inst.relocs[0].exp.X_op = O_constant;
6153 inst.relocs[0].exp.X_add_number = 0;
6154 }
6155 *str = p;
6156 return PARSE_OPERAND_SUCCESS;
6157}
6158
6159static int
6160parse_address (char **str, int i)
6161{
6162 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6163 ? SUCCESS : FAIL;
6164}
6165
6166static parse_operand_result
6167parse_address_group_reloc (char **str, int i, group_reloc_type type)
6168{
6169 return parse_address_main (str, i, 1, type);
6170}
6171
6172/* Parse an operand for a MOVW or MOVT instruction. */
6173static int
6174parse_half (char **str)
6175{
6176 char * p;
6177
6178 p = *str;
6179 skip_past_char (&p, '#');
6180 if (strncasecmp (p, ":lower16:", 9) == 0)
6181 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6182 else if (strncasecmp (p, ":upper16:", 9) == 0)
6183 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6184
6185 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6186 {
6187 p += 9;
6188 skip_whitespace (p);
6189 }
6190
6191 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6192 return FAIL;
6193
6194 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6195 {
6196 if (inst.relocs[0].exp.X_op != O_constant)
6197 {
6198 inst.error = _("constant expression expected");
6199 return FAIL;
6200 }
6201 if (inst.relocs[0].exp.X_add_number < 0
6202 || inst.relocs[0].exp.X_add_number > 0xffff)
6203 {
6204 inst.error = _("immediate value out of range");
6205 return FAIL;
6206 }
6207 }
6208 *str = p;
6209 return SUCCESS;
6210}
6211
6212/* Miscellaneous. */
6213
6214/* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6215 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6216static int
6217parse_psr (char **str, bfd_boolean lhs)
6218{
6219 char *p;
6220 unsigned long psr_field;
6221 const struct asm_psr *psr;
6222 char *start;
6223 bfd_boolean is_apsr = FALSE;
6224 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6225
6226 /* PR gas/12698: If the user has specified -march=all then m_profile will
6227 be TRUE, but we want to ignore it in this case as we are building for any
6228 CPU type, including non-m variants. */
6229 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6230 m_profile = FALSE;
6231
6232 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6233 feature for ease of use and backwards compatibility. */
6234 p = *str;
6235 if (strncasecmp (p, "SPSR", 4) == 0)
6236 {
6237 if (m_profile)
6238 goto unsupported_psr;
6239
6240 psr_field = SPSR_BIT;
6241 }
6242 else if (strncasecmp (p, "CPSR", 4) == 0)
6243 {
6244 if (m_profile)
6245 goto unsupported_psr;
6246
6247 psr_field = 0;
6248 }
6249 else if (strncasecmp (p, "APSR", 4) == 0)
6250 {
6251 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6252 and ARMv7-R architecture CPUs. */
6253 is_apsr = TRUE;
6254 psr_field = 0;
6255 }
6256 else if (m_profile)
6257 {
6258 start = p;
6259 do
6260 p++;
6261 while (ISALNUM (*p) || *p == '_');
6262
6263 if (strncasecmp (start, "iapsr", 5) == 0
6264 || strncasecmp (start, "eapsr", 5) == 0
6265 || strncasecmp (start, "xpsr", 4) == 0
6266 || strncasecmp (start, "psr", 3) == 0)
6267 p = start + strcspn (start, "rR") + 1;
6268
6269 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6270 p - start);
6271
6272 if (!psr)
6273 return FAIL;
6274
6275 /* If APSR is being written, a bitfield may be specified. Note that
6276 APSR itself is handled above. */
6277 if (psr->field <= 3)
6278 {
6279 psr_field = psr->field;
6280 is_apsr = TRUE;
6281 goto check_suffix;
6282 }
6283
6284 *str = p;
6285 /* M-profile MSR instructions have the mask field set to "10", except
6286 *PSR variants which modify APSR, which may use a different mask (and
6287 have been handled already). Do that by setting the PSR_f field
6288 here. */
6289 return psr->field | (lhs ? PSR_f : 0);
6290 }
6291 else
6292 goto unsupported_psr;
6293
6294 p += 4;
6295check_suffix:
6296 if (*p == '_')
6297 {
6298 /* A suffix follows. */
6299 p++;
6300 start = p;
6301
6302 do
6303 p++;
6304 while (ISALNUM (*p) || *p == '_');
6305
6306 if (is_apsr)
6307 {
6308 /* APSR uses a notation for bits, rather than fields. */
6309 unsigned int nzcvq_bits = 0;
6310 unsigned int g_bit = 0;
6311 char *bit;
6312
6313 for (bit = start; bit != p; bit++)
6314 {
6315 switch (TOLOWER (*bit))
6316 {
6317 case 'n':
6318 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6319 break;
6320
6321 case 'z':
6322 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6323 break;
6324
6325 case 'c':
6326 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6327 break;
6328
6329 case 'v':
6330 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6331 break;
6332
6333 case 'q':
6334 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6335 break;
6336
6337 case 'g':
6338 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6339 break;
6340
6341 default:
6342 inst.error = _("unexpected bit specified after APSR");
6343 return FAIL;
6344 }
6345 }
6346
6347 if (nzcvq_bits == 0x1f)
6348 psr_field |= PSR_f;
6349
6350 if (g_bit == 0x1)
6351 {
6352 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6353 {
6354 inst.error = _("selected processor does not "
6355 "support DSP extension");
6356 return FAIL;
6357 }
6358
6359 psr_field |= PSR_s;
6360 }
6361
6362 if ((nzcvq_bits & 0x20) != 0
6363 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6364 || (g_bit & 0x2) != 0)
6365 {
6366 inst.error = _("bad bitmask specified after APSR");
6367 return FAIL;
6368 }
6369 }
6370 else
6371 {
6372 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6373 p - start);
6374 if (!psr)
6375 goto error;
6376
6377 psr_field |= psr->field;
6378 }
6379 }
6380 else
6381 {
6382 if (ISALNUM (*p))
6383 goto error; /* Garbage after "[CS]PSR". */
6384
6385 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6386 is deprecated, but allow it anyway. */
6387 if (is_apsr && lhs)
6388 {
6389 psr_field |= PSR_f;
6390 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6391 "deprecated"));
6392 }
6393 else if (!m_profile)
6394 /* These bits are never right for M-profile devices: don't set them
6395 (only code paths which read/write APSR reach here). */
6396 psr_field |= (PSR_c | PSR_f);
6397 }
6398 *str = p;
6399 return psr_field;
6400
6401 unsupported_psr:
6402 inst.error = _("selected processor does not support requested special "
6403 "purpose register");
6404 return FAIL;
6405
6406 error:
6407 inst.error = _("flag for {c}psr instruction expected");
6408 return FAIL;
6409}
6410
6411static int
6412parse_sys_vldr_vstr (char **str)
6413{
6414 unsigned i;
6415 int val = FAIL;
6416 struct {
6417 const char *name;
6418 int regl;
6419 int regh;
6420 } sysregs[] = {
6421 {"FPSCR", 0x1, 0x0},
6422 {"FPSCR_nzcvqc", 0x2, 0x0},
6423 {"VPR", 0x4, 0x1},
6424 {"P0", 0x5, 0x1},
6425 {"FPCXTNS", 0x6, 0x1},
6426 {"FPCXTS", 0x7, 0x1}
6427 };
6428 char *op_end = strchr (*str, ',');
6429 size_t op_strlen = op_end - *str;
6430
6431 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6432 {
6433 if (!strncmp (*str, sysregs[i].name, op_strlen))
6434 {
6435 val = sysregs[i].regl | (sysregs[i].regh << 3);
6436 *str = op_end;
6437 break;
6438 }
6439 }
6440
6441 return val;
6442}
6443
6444/* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6445 value suitable for splatting into the AIF field of the instruction. */
6446
6447static int
6448parse_cps_flags (char **str)
6449{
6450 int val = 0;
6451 int saw_a_flag = 0;
6452 char *s = *str;
6453
6454 for (;;)
6455 switch (*s++)
6456 {
6457 case '\0': case ',':
6458 goto done;
6459
6460 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6461 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6462 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6463
6464 default:
6465 inst.error = _("unrecognized CPS flag");
6466 return FAIL;
6467 }
6468
6469 done:
6470 if (saw_a_flag == 0)
6471 {
6472 inst.error = _("missing CPS flags");
6473 return FAIL;
6474 }
6475
6476 *str = s - 1;
6477 return val;
6478}
6479
6480/* Parse an endian specifier ("BE" or "LE", case insensitive);
6481 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6482
6483static int
6484parse_endian_specifier (char **str)
6485{
6486 int little_endian;
6487 char *s = *str;
6488
6489 if (strncasecmp (s, "BE", 2))
6490 little_endian = 0;
6491 else if (strncasecmp (s, "LE", 2))
6492 little_endian = 1;
6493 else
6494 {
6495 inst.error = _("valid endian specifiers are be or le");
6496 return FAIL;
6497 }
6498
6499 if (ISALNUM (s[2]) || s[2] == '_')
6500 {
6501 inst.error = _("valid endian specifiers are be or le");
6502 return FAIL;
6503 }
6504
6505 *str = s + 2;
6506 return little_endian;
6507}
6508
6509/* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6510 value suitable for poking into the rotate field of an sxt or sxta
6511 instruction, or FAIL on error. */
6512
6513static int
6514parse_ror (char **str)
6515{
6516 int rot;
6517 char *s = *str;
6518
6519 if (strncasecmp (s, "ROR", 3) == 0)
6520 s += 3;
6521 else
6522 {
6523 inst.error = _("missing rotation field after comma");
6524 return FAIL;
6525 }
6526
6527 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6528 return FAIL;
6529
6530 switch (rot)
6531 {
6532 case 0: *str = s; return 0x0;
6533 case 8: *str = s; return 0x1;
6534 case 16: *str = s; return 0x2;
6535 case 24: *str = s; return 0x3;
6536
6537 default:
6538 inst.error = _("rotation can only be 0, 8, 16, or 24");
6539 return FAIL;
6540 }
6541}
6542
6543/* Parse a conditional code (from conds[] below). The value returned is in the
6544 range 0 .. 14, or FAIL. */
6545static int
6546parse_cond (char **str)
6547{
6548 char *q;
6549 const struct asm_cond *c;
6550 int n;
6551 /* Condition codes are always 2 characters, so matching up to
6552 3 characters is sufficient. */
6553 char cond[3];
6554
6555 q = *str;
6556 n = 0;
6557 while (ISALPHA (*q) && n < 3)
6558 {
6559 cond[n] = TOLOWER (*q);
6560 q++;
6561 n++;
6562 }
6563
6564 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6565 if (!c)
6566 {
6567 inst.error = _("condition required");
6568 return FAIL;
6569 }
6570
6571 *str = q;
6572 return c->value;
6573}
6574
6575/* Parse an option for a barrier instruction. Returns the encoding for the
6576 option, or FAIL. */
6577static int
6578parse_barrier (char **str)
6579{
6580 char *p, *q;
6581 const struct asm_barrier_opt *o;
6582
6583 p = q = *str;
6584 while (ISALPHA (*q))
6585 q++;
6586
6587 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6588 q - p);
6589 if (!o)
6590 return FAIL;
6591
6592 if (!mark_feature_used (&o->arch))
6593 return FAIL;
6594
6595 *str = q;
6596 return o->value;
6597}
6598
6599/* Parse the operands of a table branch instruction. Similar to a memory
6600 operand. */
6601static int
6602parse_tb (char **str)
6603{
6604 char * p = *str;
6605 int reg;
6606
6607 if (skip_past_char (&p, '[') == FAIL)
6608 {
6609 inst.error = _("'[' expected");
6610 return FAIL;
6611 }
6612
6613 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6614 {
6615 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6616 return FAIL;
6617 }
6618 inst.operands[0].reg = reg;
6619
6620 if (skip_past_comma (&p) == FAIL)
6621 {
6622 inst.error = _("',' expected");
6623 return FAIL;
6624 }
6625
6626 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6627 {
6628 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6629 return FAIL;
6630 }
6631 inst.operands[0].imm = reg;
6632
6633 if (skip_past_comma (&p) == SUCCESS)
6634 {
6635 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6636 return FAIL;
6637 if (inst.relocs[0].exp.X_add_number != 1)
6638 {
6639 inst.error = _("invalid shift");
6640 return FAIL;
6641 }
6642 inst.operands[0].shifted = 1;
6643 }
6644
6645 if (skip_past_char (&p, ']') == FAIL)
6646 {
6647 inst.error = _("']' expected");
6648 return FAIL;
6649 }
6650 *str = p;
6651 return SUCCESS;
6652}
6653
6654/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6655 information on the types the operands can take and how they are encoded.
6656 Up to four operands may be read; this function handles setting the
6657 ".present" field for each read operand itself.
6658 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6659 else returns FAIL. */
6660
6661static int
6662parse_neon_mov (char **str, int *which_operand)
6663{
6664 int i = *which_operand, val;
6665 enum arm_reg_type rtype;
6666 char *ptr = *str;
6667 struct neon_type_el optype;
6668
6669 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6670 {
6671 /* Cases 17 or 19. */
6672 inst.operands[i].reg = val;
6673 inst.operands[i].isvec = 1;
6674 inst.operands[i].isscalar = 2;
6675 inst.operands[i].vectype = optype;
6676 inst.operands[i++].present = 1;
6677
6678 if (skip_past_comma (&ptr) == FAIL)
6679 goto wanted_comma;
6680
6681 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6682 {
6683 /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */
6684 inst.operands[i].reg = val;
6685 inst.operands[i].isreg = 1;
6686 inst.operands[i].present = 1;
6687 }
6688 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6689 {
6690 /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */
6691 inst.operands[i].reg = val;
6692 inst.operands[i].isvec = 1;
6693 inst.operands[i].isscalar = 2;
6694 inst.operands[i].vectype = optype;
6695 inst.operands[i++].present = 1;
6696
6697 if (skip_past_comma (&ptr) == FAIL)
6698 goto wanted_comma;
6699
6700 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6701 goto wanted_arm;
6702
6703 inst.operands[i].reg = val;
6704 inst.operands[i].isreg = 1;
6705 inst.operands[i++].present = 1;
6706
6707 if (skip_past_comma (&ptr) == FAIL)
6708 goto wanted_comma;
6709
6710 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6711 goto wanted_arm;
6712
6713 inst.operands[i].reg = val;
6714 inst.operands[i].isreg = 1;
6715 inst.operands[i].present = 1;
6716 }
6717 else
6718 {
6719 first_error (_("expected ARM or MVE vector register"));
6720 return FAIL;
6721 }
6722 }
6723 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6724 {
6725 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6726 inst.operands[i].reg = val;
6727 inst.operands[i].isscalar = 1;
6728 inst.operands[i].vectype = optype;
6729 inst.operands[i++].present = 1;
6730
6731 if (skip_past_comma (&ptr) == FAIL)
6732 goto wanted_comma;
6733
6734 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6735 goto wanted_arm;
6736
6737 inst.operands[i].reg = val;
6738 inst.operands[i].isreg = 1;
6739 inst.operands[i].present = 1;
6740 }
6741 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6742 != FAIL)
6743 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6744 != FAIL))
6745 {
6746 /* Cases 0, 1, 2, 3, 5 (D only). */
6747 if (skip_past_comma (&ptr) == FAIL)
6748 goto wanted_comma;
6749
6750 inst.operands[i].reg = val;
6751 inst.operands[i].isreg = 1;
6752 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6753 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6754 inst.operands[i].isvec = 1;
6755 inst.operands[i].vectype = optype;
6756 inst.operands[i++].present = 1;
6757
6758 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6759 {
6760 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6761 Case 13: VMOV <Sd>, <Rm> */
6762 inst.operands[i].reg = val;
6763 inst.operands[i].isreg = 1;
6764 inst.operands[i].present = 1;
6765
6766 if (rtype == REG_TYPE_NQ)
6767 {
6768 first_error (_("can't use Neon quad register here"));
6769 return FAIL;
6770 }
6771 else if (rtype != REG_TYPE_VFS)
6772 {
6773 i++;
6774 if (skip_past_comma (&ptr) == FAIL)
6775 goto wanted_comma;
6776 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6777 goto wanted_arm;
6778 inst.operands[i].reg = val;
6779 inst.operands[i].isreg = 1;
6780 inst.operands[i].present = 1;
6781 }
6782 }
6783 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6784 &optype)) != FAIL)
6785 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6786 &optype)) != FAIL))
6787 {
6788 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6789 Case 1: VMOV<c><q> <Dd>, <Dm>
6790 Case 8: VMOV.F32 <Sd>, <Sm>
6791 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6792
6793 inst.operands[i].reg = val;
6794 inst.operands[i].isreg = 1;
6795 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6796 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6797 inst.operands[i].isvec = 1;
6798 inst.operands[i].vectype = optype;
6799 inst.operands[i].present = 1;
6800
6801 if (skip_past_comma (&ptr) == SUCCESS)
6802 {
6803 /* Case 15. */
6804 i++;
6805
6806 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6807 goto wanted_arm;
6808
6809 inst.operands[i].reg = val;
6810 inst.operands[i].isreg = 1;
6811 inst.operands[i++].present = 1;
6812
6813 if (skip_past_comma (&ptr) == FAIL)
6814 goto wanted_comma;
6815
6816 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6817 goto wanted_arm;
6818
6819 inst.operands[i].reg = val;
6820 inst.operands[i].isreg = 1;
6821 inst.operands[i].present = 1;
6822 }
6823 }
6824 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6825 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6826 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6827 Case 10: VMOV.F32 <Sd>, #<imm>
6828 Case 11: VMOV.F64 <Dd>, #<imm> */
6829 inst.operands[i].immisfloat = 1;
6830 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6831 == SUCCESS)
6832 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6833 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6834 ;
6835 else
6836 {
6837 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6838 return FAIL;
6839 }
6840 }
6841 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6842 {
6843 /* Cases 6, 7, 16, 18. */
6844 inst.operands[i].reg = val;
6845 inst.operands[i].isreg = 1;
6846 inst.operands[i++].present = 1;
6847
6848 if (skip_past_comma (&ptr) == FAIL)
6849 goto wanted_comma;
6850
6851 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6852 {
6853 /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */
6854 inst.operands[i].reg = val;
6855 inst.operands[i].isscalar = 2;
6856 inst.operands[i].present = 1;
6857 inst.operands[i].vectype = optype;
6858 }
6859 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6860 {
6861 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6862 inst.operands[i].reg = val;
6863 inst.operands[i].isscalar = 1;
6864 inst.operands[i].present = 1;
6865 inst.operands[i].vectype = optype;
6866 }
6867 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6868 {
6869 inst.operands[i].reg = val;
6870 inst.operands[i].isreg = 1;
6871 inst.operands[i++].present = 1;
6872
6873 if (skip_past_comma (&ptr) == FAIL)
6874 goto wanted_comma;
6875
6876 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6877 != FAIL)
6878 {
6879 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6880
6881 inst.operands[i].reg = val;
6882 inst.operands[i].isreg = 1;
6883 inst.operands[i].isvec = 1;
6884 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6885 inst.operands[i].vectype = optype;
6886 inst.operands[i].present = 1;
6887
6888 if (rtype == REG_TYPE_VFS)
6889 {
6890 /* Case 14. */
6891 i++;
6892 if (skip_past_comma (&ptr) == FAIL)
6893 goto wanted_comma;
6894 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6895 &optype)) == FAIL)
6896 {
6897 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6898 return FAIL;
6899 }
6900 inst.operands[i].reg = val;
6901 inst.operands[i].isreg = 1;
6902 inst.operands[i].isvec = 1;
6903 inst.operands[i].issingle = 1;
6904 inst.operands[i].vectype = optype;
6905 inst.operands[i].present = 1;
6906 }
6907 }
6908 else
6909 {
6910 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6911 != FAIL)
6912 {
6913 /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */
6914 inst.operands[i].reg = val;
6915 inst.operands[i].isvec = 1;
6916 inst.operands[i].isscalar = 2;
6917 inst.operands[i].vectype = optype;
6918 inst.operands[i++].present = 1;
6919
6920 if (skip_past_comma (&ptr) == FAIL)
6921 goto wanted_comma;
6922
6923 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6924 == FAIL)
6925 {
6926 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
6927 return FAIL;
6928 }
6929 inst.operands[i].reg = val;
6930 inst.operands[i].isvec = 1;
6931 inst.operands[i].isscalar = 2;
6932 inst.operands[i].vectype = optype;
6933 inst.operands[i].present = 1;
6934 }
6935 else
6936 {
6937 first_error (_("VFP single, double or MVE vector register"
6938 " expected"));
6939 return FAIL;
6940 }
6941 }
6942 }
6943 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6944 != FAIL)
6945 {
6946 /* Case 13. */
6947 inst.operands[i].reg = val;
6948 inst.operands[i].isreg = 1;
6949 inst.operands[i].isvec = 1;
6950 inst.operands[i].issingle = 1;
6951 inst.operands[i].vectype = optype;
6952 inst.operands[i].present = 1;
6953 }
6954 }
6955 else
6956 {
6957 first_error (_("parse error"));
6958 return FAIL;
6959 }
6960
6961 /* Successfully parsed the operands. Update args. */
6962 *which_operand = i;
6963 *str = ptr;
6964 return SUCCESS;
6965
6966 wanted_comma:
6967 first_error (_("expected comma"));
6968 return FAIL;
6969
6970 wanted_arm:
6971 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6972 return FAIL;
6973}
6974
6975/* Use this macro when the operand constraints are different
6976 for ARM and THUMB (e.g. ldrd). */
6977#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6978 ((arm_operand) | ((thumb_operand) << 16))
6979
6980/* Matcher codes for parse_operands. */
6981enum operand_parse_code
6982{
6983 OP_stop, /* end of line */
6984
6985 OP_RR, /* ARM register */
6986 OP_RRnpc, /* ARM register, not r15 */
6987 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6988 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6989 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6990 optional trailing ! */
6991 OP_RRw, /* ARM register, not r15, optional trailing ! */
6992 OP_RCP, /* Coprocessor number */
6993 OP_RCN, /* Coprocessor register */
6994 OP_RF, /* FPA register */
6995 OP_RVS, /* VFP single precision register */
6996 OP_RVD, /* VFP double precision register (0..15) */
6997 OP_RND, /* Neon double precision register (0..31) */
6998 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
6999 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
7000 */
7001 OP_RNQ, /* Neon quad precision register */
7002 OP_RNQMQ, /* Neon quad or MVE vector register. */
7003 OP_RVSD, /* VFP single or double precision register */
7004 OP_RVSD_COND, /* VFP single, double precision register or condition code. */
7005 OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */
7006 OP_RNSD, /* Neon single or double precision register */
7007 OP_RNDQ, /* Neon double or quad precision register */
7008 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
7009 OP_RNDQMQR, /* Neon double, quad, MVE vector or ARM register. */
7010 OP_RNSDQ, /* Neon single, double or quad precision register */
7011 OP_RNSC, /* Neon scalar D[X] */
7012 OP_RVC, /* VFP control register */
7013 OP_RMF, /* Maverick F register */
7014 OP_RMD, /* Maverick D register */
7015 OP_RMFX, /* Maverick FX register */
7016 OP_RMDX, /* Maverick DX register */
7017 OP_RMAX, /* Maverick AX register */
7018 OP_RMDS, /* Maverick DSPSC register */
7019 OP_RIWR, /* iWMMXt wR register */
7020 OP_RIWC, /* iWMMXt wC register */
7021 OP_RIWG, /* iWMMXt wCG register */
7022 OP_RXA, /* XScale accumulator register */
7023
7024 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
7025 */
7026 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
7027 GPR (no SP/SP) */
7028 OP_RMQ, /* MVE vector register. */
7029 OP_RMQRZ, /* MVE vector or ARM register including ZR. */
7030 OP_RMQRR, /* MVE vector or ARM register. */
7031
7032 /* New operands for Armv8.1-M Mainline. */
7033 OP_LR, /* ARM LR register */
7034 OP_RRe, /* ARM register, only even numbered. */
7035 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
7036 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7037 OP_RR_ZR, /* ARM register or ZR but no PC */
7038
7039 OP_REGLST, /* ARM register list */
7040 OP_CLRMLST, /* CLRM register list */
7041 OP_VRSLST, /* VFP single-precision register list */
7042 OP_VRDLST, /* VFP double-precision register list */
7043 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
7044 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
7045 OP_NSTRLST, /* Neon element/structure list */
7046 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
7047 OP_MSTRLST2, /* MVE vector list with two elements. */
7048 OP_MSTRLST4, /* MVE vector list with four elements. */
7049
7050 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
7051 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
7052 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
7053 OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7054 zero. */
7055 OP_RR_RNSC, /* ARM reg or Neon scalar. */
7056 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
7057 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
7058 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7059 */
7060 OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7061 scalar, or ARM register. */
7062 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
7063 OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register. */
7064 OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7065 register. */
7066 OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar. */
7067 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
7068 OP_VMOV, /* Neon VMOV operands. */
7069 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
7070 /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */
7071 OP_RNDQMQ_Ibig,
7072 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
7073 OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7074 ARM register. */
7075 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
7076 OP_VLDR, /* VLDR operand. */
7077
7078 OP_I0, /* immediate zero */
7079 OP_I7, /* immediate value 0 .. 7 */
7080 OP_I15, /* 0 .. 15 */
7081 OP_I16, /* 1 .. 16 */
7082 OP_I16z, /* 0 .. 16 */
7083 OP_I31, /* 0 .. 31 */
7084 OP_I31w, /* 0 .. 31, optional trailing ! */
7085 OP_I32, /* 1 .. 32 */
7086 OP_I32z, /* 0 .. 32 */
7087 OP_I48_I64, /* 48 or 64 */
7088 OP_I63, /* 0 .. 63 */
7089 OP_I63s, /* -64 .. 63 */
7090 OP_I64, /* 1 .. 64 */
7091 OP_I64z, /* 0 .. 64 */
7092 OP_I255, /* 0 .. 255 */
7093
7094 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
7095 OP_I7b, /* 0 .. 7 */
7096 OP_I15b, /* 0 .. 15 */
7097 OP_I31b, /* 0 .. 31 */
7098
7099 OP_SH, /* shifter operand */
7100 OP_SHG, /* shifter operand with possible group relocation */
7101 OP_ADDR, /* Memory address expression (any mode) */
7102 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
7103 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
7104 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7105 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
7106 OP_EXP, /* arbitrary expression */
7107 OP_EXPi, /* same, with optional immediate prefix */
7108 OP_EXPr, /* same, with optional relocation suffix */
7109 OP_EXPs, /* same, with optional non-first operand relocation suffix */
7110 OP_HALF, /* 0 .. 65535 or low/high reloc. */
7111 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
7112 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
7113
7114 OP_CPSF, /* CPS flags */
7115 OP_ENDI, /* Endianness specifier */
7116 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
7117 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
7118 OP_COND, /* conditional code */
7119 OP_TB, /* Table branch. */
7120
7121 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
7122
7123 OP_RRnpc_I0, /* ARM register or literal 0 */
7124 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
7125 OP_RR_EXi, /* ARM register or expression with imm prefix */
7126 OP_RF_IF, /* FPA register or immediate */
7127 OP_RIWR_RIWC, /* iWMMXt R or C reg */
7128 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7129
7130 /* Optional operands. */
7131 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
7132 OP_oI31b, /* 0 .. 31 */
7133 OP_oI32b, /* 1 .. 32 */
7134 OP_oI32z, /* 0 .. 32 */
7135 OP_oIffffb, /* 0 .. 65535 */
7136 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
7137
7138 OP_oRR, /* ARM register */
7139 OP_oLR, /* ARM LR register */
7140 OP_oRRnpc, /* ARM register, not the PC */
7141 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7142 OP_oRRw, /* ARM register, not r15, optional trailing ! */
7143 OP_oRND, /* Optional Neon double precision register */
7144 OP_oRNQ, /* Optional Neon quad precision register */
7145 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
7146 OP_oRNDQ, /* Optional Neon double or quad precision register */
7147 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
7148 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
7149 register. */
7150 OP_oSHll, /* LSL immediate */
7151 OP_oSHar, /* ASR immediate */
7152 OP_oSHllar, /* LSL or ASR immediate */
7153 OP_oROR, /* ROR 0/8/16/24 */
7154 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
7155
7156 OP_oRMQRZ, /* optional MVE vector or ARM register including ZR. */
7157
7158 /* Some pre-defined mixed (ARM/THUMB) operands. */
7159 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7160 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7161 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7162
7163 OP_FIRST_OPTIONAL = OP_oI7b
7164};
7165
7166/* Generic instruction operand parser. This does no encoding and no
7167 semantic validation; it merely squirrels values away in the inst
7168 structure. Returns SUCCESS or FAIL depending on whether the
7169 specified grammar matched. */
7170static int
7171parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
7172{
7173 unsigned const int *upat = pattern;
7174 char *backtrack_pos = 0;
7175 const char *backtrack_error = 0;
7176 int i, val = 0, backtrack_index = 0;
7177 enum arm_reg_type rtype;
7178 parse_operand_result result;
7179 unsigned int op_parse_code;
7180 bfd_boolean partial_match;
7181
7182#define po_char_or_fail(chr) \
7183 do \
7184 { \
7185 if (skip_past_char (&str, chr) == FAIL) \
7186 goto bad_args; \
7187 } \
7188 while (0)
7189
7190#define po_reg_or_fail(regtype) \
7191 do \
7192 { \
7193 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7194 & inst.operands[i].vectype); \
7195 if (val == FAIL) \
7196 { \
7197 first_error (_(reg_expected_msgs[regtype])); \
7198 goto failure; \
7199 } \
7200 inst.operands[i].reg = val; \
7201 inst.operands[i].isreg = 1; \
7202 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7203 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7204 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7205 || rtype == REG_TYPE_VFD \
7206 || rtype == REG_TYPE_NQ); \
7207 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7208 } \
7209 while (0)
7210
7211#define po_reg_or_goto(regtype, label) \
7212 do \
7213 { \
7214 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7215 & inst.operands[i].vectype); \
7216 if (val == FAIL) \
7217 goto label; \
7218 \
7219 inst.operands[i].reg = val; \
7220 inst.operands[i].isreg = 1; \
7221 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7222 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7223 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7224 || rtype == REG_TYPE_VFD \
7225 || rtype == REG_TYPE_NQ); \
7226 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7227 } \
7228 while (0)
7229
7230#define po_imm_or_fail(min, max, popt) \
7231 do \
7232 { \
7233 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
7234 goto failure; \
7235 inst.operands[i].imm = val; \
7236 } \
7237 while (0)
7238
7239#define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \
7240 do \
7241 { \
7242 expressionS exp; \
7243 my_get_expression (&exp, &str, popt); \
7244 if (exp.X_op != O_constant) \
7245 { \
7246 inst.error = _("constant expression required"); \
7247 goto failure; \
7248 } \
7249 if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7250 { \
7251 inst.error = _("immediate value 48 or 64 expected"); \
7252 goto failure; \
7253 } \
7254 inst.operands[i].imm = exp.X_add_number; \
7255 } \
7256 while (0)
7257
7258#define po_scalar_or_goto(elsz, label, reg_type) \
7259 do \
7260 { \
7261 val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \
7262 reg_type); \
7263 if (val == FAIL) \
7264 goto label; \
7265 inst.operands[i].reg = val; \
7266 inst.operands[i].isscalar = 1; \
7267 } \
7268 while (0)
7269
7270#define po_misc_or_fail(expr) \
7271 do \
7272 { \
7273 if (expr) \
7274 goto failure; \
7275 } \
7276 while (0)
7277
7278#define po_misc_or_fail_no_backtrack(expr) \
7279 do \
7280 { \
7281 result = expr; \
7282 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7283 backtrack_pos = 0; \
7284 if (result != PARSE_OPERAND_SUCCESS) \
7285 goto failure; \
7286 } \
7287 while (0)
7288
7289#define po_barrier_or_imm(str) \
7290 do \
7291 { \
7292 val = parse_barrier (&str); \
7293 if (val == FAIL && ! ISALPHA (*str)) \
7294 goto immediate; \
7295 if (val == FAIL \
7296 /* ISB can only take SY as an option. */ \
7297 || ((inst.instruction & 0xf0) == 0x60 \
7298 && val != 0xf)) \
7299 { \
7300 inst.error = _("invalid barrier type"); \
7301 backtrack_pos = 0; \
7302 goto failure; \
7303 } \
7304 } \
7305 while (0)
7306
7307 skip_whitespace (str);
7308
7309 for (i = 0; upat[i] != OP_stop; i++)
7310 {
7311 op_parse_code = upat[i];
7312 if (op_parse_code >= 1<<16)
7313 op_parse_code = thumb ? (op_parse_code >> 16)
7314 : (op_parse_code & ((1<<16)-1));
7315
7316 if (op_parse_code >= OP_FIRST_OPTIONAL)
7317 {
7318 /* Remember where we are in case we need to backtrack. */
7319 backtrack_pos = str;
7320 backtrack_error = inst.error;
7321 backtrack_index = i;
7322 }
7323
7324 if (i > 0 && (i > 1 || inst.operands[0].present))
7325 po_char_or_fail (',');
7326
7327 switch (op_parse_code)
7328 {
7329 /* Registers */
7330 case OP_oRRnpc:
7331 case OP_oRRnpcsp:
7332 case OP_RRnpc:
7333 case OP_RRnpcsp:
7334 case OP_oRR:
7335 case OP_RRe:
7336 case OP_RRo:
7337 case OP_LR:
7338 case OP_oLR:
7339 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7340 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7341 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7342 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7343 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7344 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7345 case OP_oRND:
7346 case OP_RNDMQR:
7347 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7348 break;
7349 try_rndmq:
7350 case OP_RNDMQ:
7351 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7352 break;
7353 try_rnd:
7354 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7355 case OP_RVC:
7356 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7357 break;
7358 /* Also accept generic coprocessor regs for unknown registers. */
7359 coproc_reg:
7360 po_reg_or_goto (REG_TYPE_CN, vpr_po);
7361 break;
7362 /* Also accept P0 or p0 for VPR.P0. Since P0 is already an
7363 existing register with a value of 0, this seems like the
7364 best way to parse P0. */
7365 vpr_po:
7366 if (strncasecmp (str, "P0", 2) == 0)
7367 {
7368 str += 2;
7369 inst.operands[i].isreg = 1;
7370 inst.operands[i].reg = 13;
7371 }
7372 else
7373 goto failure;
7374 break;
7375 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7376 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7377 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7378 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7379 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7380 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7381 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7382 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7383 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7384 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7385 case OP_oRNQ:
7386 case OP_RNQMQ:
7387 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7388 break;
7389 try_nq:
7390 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7391 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7392 case OP_RNDQMQR:
7393 po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7394 break;
7395 try_rndqmq:
7396 case OP_oRNDQMQ:
7397 case OP_RNDQMQ:
7398 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7399 break;
7400 try_rndq:
7401 case OP_oRNDQ:
7402 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7403 case OP_RVSDMQ:
7404 po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7405 break;
7406 try_rvsd:
7407 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7408 case OP_RVSD_COND:
7409 po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7410 break;
7411 case OP_oRNSDQ:
7412 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7413 case OP_RNSDQMQR:
7414 po_reg_or_goto (REG_TYPE_RN, try_mq);
7415 break;
7416 try_mq:
7417 case OP_oRNSDQMQ:
7418 case OP_RNSDQMQ:
7419 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7420 break;
7421 try_nsdq2:
7422 po_reg_or_fail (REG_TYPE_NSDQ);
7423 inst.error = 0;
7424 break;
7425 case OP_RMQRR:
7426 po_reg_or_goto (REG_TYPE_RN, try_rmq);
7427 break;
7428 try_rmq:
7429 case OP_RMQ:
7430 po_reg_or_fail (REG_TYPE_MQ);
7431 break;
7432 /* Neon scalar. Using an element size of 8 means that some invalid
7433 scalars are accepted here, so deal with those in later code. */
7434 case OP_RNSC: po_scalar_or_goto (8, failure, REG_TYPE_VFD); break;
7435
7436 case OP_RNDQ_I0:
7437 {
7438 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7439 break;
7440 try_imm0:
7441 po_imm_or_fail (0, 0, TRUE);
7442 }
7443 break;
7444
7445 case OP_RVSD_I0:
7446 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7447 break;
7448
7449 case OP_RSVDMQ_FI0:
7450 po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7451 break;
7452 try_rsvd_fi0:
7453 case OP_RSVD_FI0:
7454 {
7455 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7456 break;
7457 try_ifimm0:
7458 if (parse_ifimm_zero (&str))
7459 inst.operands[i].imm = 0;
7460 else
7461 {
7462 inst.error
7463 = _("only floating point zero is allowed as immediate value");
7464 goto failure;
7465 }
7466 }
7467 break;
7468
7469 case OP_RR_RNSC:
7470 {
7471 po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7472 break;
7473 try_rr:
7474 po_reg_or_fail (REG_TYPE_RN);
7475 }
7476 break;
7477
7478 case OP_RNSDQ_RNSC_MQ_RR:
7479 po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7480 break;
7481 try_rnsdq_rnsc_mq:
7482 case OP_RNSDQ_RNSC_MQ:
7483 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7484 break;
7485 try_rnsdq_rnsc:
7486 case OP_RNSDQ_RNSC:
7487 {
7488 po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7489 inst.error = 0;
7490 break;
7491 try_nsdq:
7492 po_reg_or_fail (REG_TYPE_NSDQ);
7493 inst.error = 0;
7494 }
7495 break;
7496
7497 case OP_RNSD_RNSC:
7498 {
7499 po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7500 break;
7501 try_s_scalar:
7502 po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7503 break;
7504 try_nsd:
7505 po_reg_or_fail (REG_TYPE_NSD);
7506 }
7507 break;
7508
7509 case OP_RNDQMQ_RNSC_RR:
7510 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7511 break;
7512 try_rndq_rnsc_rr:
7513 case OP_RNDQ_RNSC_RR:
7514 po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7515 break;
7516 case OP_RNDQMQ_RNSC:
7517 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7518 break;
7519 try_rndq_rnsc:
7520 case OP_RNDQ_RNSC:
7521 {
7522 po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7523 break;
7524 try_ndq:
7525 po_reg_or_fail (REG_TYPE_NDQ);
7526 }
7527 break;
7528
7529 case OP_RND_RNSC:
7530 {
7531 po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7532 break;
7533 try_vfd:
7534 po_reg_or_fail (REG_TYPE_VFD);
7535 }
7536 break;
7537
7538 case OP_VMOV:
7539 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7540 not careful then bad things might happen. */
7541 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7542 break;
7543
7544 case OP_RNDQMQ_Ibig:
7545 po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7546 break;
7547 try_rndq_ibig:
7548 case OP_RNDQ_Ibig:
7549 {
7550 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7551 break;
7552 try_immbig:
7553 /* There's a possibility of getting a 64-bit immediate here, so
7554 we need special handling. */
7555 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7556 == FAIL)
7557 {
7558 inst.error = _("immediate value is out of range");
7559 goto failure;
7560 }
7561 }
7562 break;
7563
7564 case OP_RNDQMQ_I63b_RR:
7565 po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7566 break;
7567 try_rndq_i63b_rr:
7568 po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7569 break;
7570 try_rndq_i63b:
7571 case OP_RNDQ_I63b:
7572 {
7573 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7574 break;
7575 try_shimm:
7576 po_imm_or_fail (0, 63, TRUE);
7577 }
7578 break;
7579
7580 case OP_RRnpcb:
7581 po_char_or_fail ('[');
7582 po_reg_or_fail (REG_TYPE_RN);
7583 po_char_or_fail (']');
7584 break;
7585
7586 case OP_RRnpctw:
7587 case OP_RRw:
7588 case OP_oRRw:
7589 po_reg_or_fail (REG_TYPE_RN);
7590 if (skip_past_char (&str, '!') == SUCCESS)
7591 inst.operands[i].writeback = 1;
7592 break;
7593
7594 /* Immediates */
7595 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7596 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7597 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7598 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7599 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7600 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7601 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7602 case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
7603 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7604 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7605 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7606 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7607 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7608
7609 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7610 case OP_oI7b:
7611 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7612 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7613 case OP_oI31b:
7614 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7615 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7616 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7617 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7618
7619 /* Immediate variants */
7620 case OP_oI255c:
7621 po_char_or_fail ('{');
7622 po_imm_or_fail (0, 255, TRUE);
7623 po_char_or_fail ('}');
7624 break;
7625
7626 case OP_I31w:
7627 /* The expression parser chokes on a trailing !, so we have
7628 to find it first and zap it. */
7629 {
7630 char *s = str;
7631 while (*s && *s != ',')
7632 s++;
7633 if (s[-1] == '!')
7634 {
7635 s[-1] = '\0';
7636 inst.operands[i].writeback = 1;
7637 }
7638 po_imm_or_fail (0, 31, TRUE);
7639 if (str == s - 1)
7640 str = s;
7641 }
7642 break;
7643
7644 /* Expressions */
7645 case OP_EXPi: EXPi:
7646 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7647 GE_OPT_PREFIX));
7648 break;
7649
7650 case OP_EXP:
7651 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7652 GE_NO_PREFIX));
7653 break;
7654
7655 case OP_EXPr: EXPr:
7656 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7657 GE_NO_PREFIX));
7658 if (inst.relocs[0].exp.X_op == O_symbol)
7659 {
7660 val = parse_reloc (&str);
7661 if (val == -1)
7662 {
7663 inst.error = _("unrecognized relocation suffix");
7664 goto failure;
7665 }
7666 else if (val != BFD_RELOC_UNUSED)
7667 {
7668 inst.operands[i].imm = val;
7669 inst.operands[i].hasreloc = 1;
7670 }
7671 }
7672 break;
7673
7674 case OP_EXPs:
7675 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7676 GE_NO_PREFIX));
7677 if (inst.relocs[i].exp.X_op == O_symbol)
7678 {
7679 inst.operands[i].hasreloc = 1;
7680 }
7681 else if (inst.relocs[i].exp.X_op == O_constant)
7682 {
7683 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7684 inst.operands[i].hasreloc = 0;
7685 }
7686 break;
7687
7688 /* Operand for MOVW or MOVT. */
7689 case OP_HALF:
7690 po_misc_or_fail (parse_half (&str));
7691 break;
7692
7693 /* Register or expression. */
7694 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7695 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7696
7697 /* Register or immediate. */
7698 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7699 I0: po_imm_or_fail (0, 0, FALSE); break;
7700
7701 case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break;
7702 I32: po_imm_or_fail (1, 32, FALSE); break;
7703
7704 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7705 IF:
7706 if (!is_immediate_prefix (*str))
7707 goto bad_args;
7708 str++;
7709 val = parse_fpa_immediate (&str);
7710 if (val == FAIL)
7711 goto failure;
7712 /* FPA immediates are encoded as registers 8-15.
7713 parse_fpa_immediate has already applied the offset. */
7714 inst.operands[i].reg = val;
7715 inst.operands[i].isreg = 1;
7716 break;
7717
7718 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7719 I32z: po_imm_or_fail (0, 32, FALSE); break;
7720
7721 /* Two kinds of register. */
7722 case OP_RIWR_RIWC:
7723 {
7724 struct reg_entry *rege = arm_reg_parse_multi (&str);
7725 if (!rege
7726 || (rege->type != REG_TYPE_MMXWR
7727 && rege->type != REG_TYPE_MMXWC
7728 && rege->type != REG_TYPE_MMXWCG))
7729 {
7730 inst.error = _("iWMMXt data or control register expected");
7731 goto failure;
7732 }
7733 inst.operands[i].reg = rege->number;
7734 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7735 }
7736 break;
7737
7738 case OP_RIWC_RIWG:
7739 {
7740 struct reg_entry *rege = arm_reg_parse_multi (&str);
7741 if (!rege
7742 || (rege->type != REG_TYPE_MMXWC
7743 && rege->type != REG_TYPE_MMXWCG))
7744 {
7745 inst.error = _("iWMMXt control register expected");
7746 goto failure;
7747 }
7748 inst.operands[i].reg = rege->number;
7749 inst.operands[i].isreg = 1;
7750 }
7751 break;
7752
7753 /* Misc */
7754 case OP_CPSF: val = parse_cps_flags (&str); break;
7755 case OP_ENDI: val = parse_endian_specifier (&str); break;
7756 case OP_oROR: val = parse_ror (&str); break;
7757 try_cond:
7758 case OP_COND: val = parse_cond (&str); break;
7759 case OP_oBARRIER_I15:
7760 po_barrier_or_imm (str); break;
7761 immediate:
7762 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7763 goto failure;
7764 break;
7765
7766 case OP_wPSR:
7767 case OP_rPSR:
7768 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7769 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7770 {
7771 inst.error = _("Banked registers are not available with this "
7772 "architecture.");
7773 goto failure;
7774 }
7775 break;
7776 try_psr:
7777 val = parse_psr (&str, op_parse_code == OP_wPSR);
7778 break;
7779
7780 case OP_VLDR:
7781 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7782 break;
7783 try_sysreg:
7784 val = parse_sys_vldr_vstr (&str);
7785 break;
7786
7787 case OP_APSR_RR:
7788 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7789 break;
7790 try_apsr:
7791 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7792 instruction). */
7793 if (strncasecmp (str, "APSR_", 5) == 0)
7794 {
7795 unsigned found = 0;
7796 str += 5;
7797 while (found < 15)
7798 switch (*str++)
7799 {
7800 case 'c': found = (found & 1) ? 16 : found | 1; break;
7801 case 'n': found = (found & 2) ? 16 : found | 2; break;
7802 case 'z': found = (found & 4) ? 16 : found | 4; break;
7803 case 'v': found = (found & 8) ? 16 : found | 8; break;
7804 default: found = 16;
7805 }
7806 if (found != 15)
7807 goto failure;
7808 inst.operands[i].isvec = 1;
7809 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7810 inst.operands[i].reg = REG_PC;
7811 }
7812 else
7813 goto failure;
7814 break;
7815
7816 case OP_TB:
7817 po_misc_or_fail (parse_tb (&str));
7818 break;
7819
7820 /* Register lists. */
7821 case OP_REGLST:
7822 val = parse_reg_list (&str, REGLIST_RN);
7823 if (*str == '^')
7824 {
7825 inst.operands[i].writeback = 1;
7826 str++;
7827 }
7828 break;
7829
7830 case OP_CLRMLST:
7831 val = parse_reg_list (&str, REGLIST_CLRM);
7832 break;
7833
7834 case OP_VRSLST:
7835 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7836 &partial_match);
7837 break;
7838
7839 case OP_VRDLST:
7840 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7841 &partial_match);
7842 break;
7843
7844 case OP_VRSDLST:
7845 /* Allow Q registers too. */
7846 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7847 REGLIST_NEON_D, &partial_match);
7848 if (val == FAIL)
7849 {
7850 inst.error = NULL;
7851 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7852 REGLIST_VFP_S, &partial_match);
7853 inst.operands[i].issingle = 1;
7854 }
7855 break;
7856
7857 case OP_VRSDVLST:
7858 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7859 REGLIST_VFP_D_VPR, &partial_match);
7860 if (val == FAIL && !partial_match)
7861 {
7862 inst.error = NULL;
7863 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7864 REGLIST_VFP_S_VPR, &partial_match);
7865 inst.operands[i].issingle = 1;
7866 }
7867 break;
7868
7869 case OP_NRDLST:
7870 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7871 REGLIST_NEON_D, &partial_match);
7872 break;
7873
7874 case OP_MSTRLST4:
7875 case OP_MSTRLST2:
7876 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7877 1, &inst.operands[i].vectype);
7878 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7879 goto failure;
7880 break;
7881 case OP_NSTRLST:
7882 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7883 0, &inst.operands[i].vectype);
7884 break;
7885
7886 /* Addressing modes */
7887 case OP_ADDRMVE:
7888 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7889 break;
7890
7891 case OP_ADDR:
7892 po_misc_or_fail (parse_address (&str, i));
7893 break;
7894
7895 case OP_ADDRGLDR:
7896 po_misc_or_fail_no_backtrack (
7897 parse_address_group_reloc (&str, i, GROUP_LDR));
7898 break;
7899
7900 case OP_ADDRGLDRS:
7901 po_misc_or_fail_no_backtrack (
7902 parse_address_group_reloc (&str, i, GROUP_LDRS));
7903 break;
7904
7905 case OP_ADDRGLDC:
7906 po_misc_or_fail_no_backtrack (
7907 parse_address_group_reloc (&str, i, GROUP_LDC));
7908 break;
7909
7910 case OP_SH:
7911 po_misc_or_fail (parse_shifter_operand (&str, i));
7912 break;
7913
7914 case OP_SHG:
7915 po_misc_or_fail_no_backtrack (
7916 parse_shifter_operand_group_reloc (&str, i));
7917 break;
7918
7919 case OP_oSHll:
7920 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7921 break;
7922
7923 case OP_oSHar:
7924 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7925 break;
7926
7927 case OP_oSHllar:
7928 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7929 break;
7930
7931 case OP_RMQRZ:
7932 case OP_oRMQRZ:
7933 po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
7934 break;
7935
7936 case OP_RR_ZR:
7937 try_rr_zr:
7938 po_reg_or_goto (REG_TYPE_RN, ZR);
7939 break;
7940 ZR:
7941 po_reg_or_fail (REG_TYPE_ZR);
7942 break;
7943
7944 default:
7945 as_fatal (_("unhandled operand code %d"), op_parse_code);
7946 }
7947
7948 /* Various value-based sanity checks and shared operations. We
7949 do not signal immediate failures for the register constraints;
7950 this allows a syntax error to take precedence. */
7951 switch (op_parse_code)
7952 {
7953 case OP_oRRnpc:
7954 case OP_RRnpc:
7955 case OP_RRnpcb:
7956 case OP_RRw:
7957 case OP_oRRw:
7958 case OP_RRnpc_I0:
7959 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7960 inst.error = BAD_PC;
7961 break;
7962
7963 case OP_oRRnpcsp:
7964 case OP_RRnpcsp:
7965 case OP_RRnpcsp_I32:
7966 if (inst.operands[i].isreg)
7967 {
7968 if (inst.operands[i].reg == REG_PC)
7969 inst.error = BAD_PC;
7970 else if (inst.operands[i].reg == REG_SP
7971 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7972 relaxed since ARMv8-A. */
7973 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7974 {
7975 gas_assert (thumb);
7976 inst.error = BAD_SP;
7977 }
7978 }
7979 break;
7980
7981 case OP_RRnpctw:
7982 if (inst.operands[i].isreg
7983 && inst.operands[i].reg == REG_PC
7984 && (inst.operands[i].writeback || thumb))
7985 inst.error = BAD_PC;
7986 break;
7987
7988 case OP_RVSD_COND:
7989 case OP_VLDR:
7990 if (inst.operands[i].isreg)
7991 break;
7992 /* fall through. */
7993
7994 case OP_CPSF:
7995 case OP_ENDI:
7996 case OP_oROR:
7997 case OP_wPSR:
7998 case OP_rPSR:
7999 case OP_COND:
8000 case OP_oBARRIER_I15:
8001 case OP_REGLST:
8002 case OP_CLRMLST:
8003 case OP_VRSLST:
8004 case OP_VRDLST:
8005 case OP_VRSDLST:
8006 case OP_VRSDVLST:
8007 case OP_NRDLST:
8008 case OP_NSTRLST:
8009 case OP_MSTRLST2:
8010 case OP_MSTRLST4:
8011 if (val == FAIL)
8012 goto failure;
8013 inst.operands[i].imm = val;
8014 break;
8015
8016 case OP_LR:
8017 case OP_oLR:
8018 if (inst.operands[i].reg != REG_LR)
8019 inst.error = _("operand must be LR register");
8020 break;
8021
8022 case OP_RMQRZ:
8023 case OP_oRMQRZ:
8024 case OP_RR_ZR:
8025 if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8026 inst.error = BAD_PC;
8027 break;
8028
8029 case OP_RRe:
8030 if (inst.operands[i].isreg
8031 && (inst.operands[i].reg & 0x00000001) != 0)
8032 inst.error = BAD_ODD;
8033 break;
8034
8035 case OP_RRo:
8036 if (inst.operands[i].isreg)
8037 {
8038 if ((inst.operands[i].reg & 0x00000001) != 1)
8039 inst.error = BAD_EVEN;
8040 else if (inst.operands[i].reg == REG_SP)
8041 as_tsktsk (MVE_BAD_SP);
8042 else if (inst.operands[i].reg == REG_PC)
8043 inst.error = BAD_PC;
8044 }
8045 break;
8046
8047 default:
8048 break;
8049 }
8050
8051 /* If we get here, this operand was successfully parsed. */
8052 inst.operands[i].present = 1;
8053 continue;
8054
8055 bad_args:
8056 inst.error = BAD_ARGS;
8057
8058 failure:
8059 if (!backtrack_pos)
8060 {
8061 /* The parse routine should already have set inst.error, but set a
8062 default here just in case. */
8063 if (!inst.error)
8064 inst.error = BAD_SYNTAX;
8065 return FAIL;
8066 }
8067
8068 /* Do not backtrack over a trailing optional argument that
8069 absorbed some text. We will only fail again, with the
8070 'garbage following instruction' error message, which is
8071 probably less helpful than the current one. */
8072 if (backtrack_index == i && backtrack_pos != str
8073 && upat[i+1] == OP_stop)
8074 {
8075 if (!inst.error)
8076 inst.error = BAD_SYNTAX;
8077 return FAIL;
8078 }
8079
8080 /* Try again, skipping the optional argument at backtrack_pos. */
8081 str = backtrack_pos;
8082 inst.error = backtrack_error;
8083 inst.operands[backtrack_index].present = 0;
8084 i = backtrack_index;
8085 backtrack_pos = 0;
8086 }
8087
8088 /* Check that we have parsed all the arguments. */
8089 if (*str != '\0' && !inst.error)
8090 inst.error = _("garbage following instruction");
8091
8092 return inst.error ? FAIL : SUCCESS;
8093}
8094
8095#undef po_char_or_fail
8096#undef po_reg_or_fail
8097#undef po_reg_or_goto
8098#undef po_imm_or_fail
8099#undef po_scalar_or_fail
8100#undef po_barrier_or_imm
8101
8102/* Shorthand macro for instruction encoding functions issuing errors. */
8103#define constraint(expr, err) \
8104 do \
8105 { \
8106 if (expr) \
8107 { \
8108 inst.error = err; \
8109 return; \
8110 } \
8111 } \
8112 while (0)
8113
8114/* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
8115 instructions are unpredictable if these registers are used. This
8116 is the BadReg predicate in ARM's Thumb-2 documentation.
8117
8118 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8119 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
8120#define reject_bad_reg(reg) \
8121 do \
8122 if (reg == REG_PC) \
8123 { \
8124 inst.error = BAD_PC; \
8125 return; \
8126 } \
8127 else if (reg == REG_SP \
8128 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
8129 { \
8130 inst.error = BAD_SP; \
8131 return; \
8132 } \
8133 while (0)
8134
8135/* If REG is R13 (the stack pointer), warn that its use is
8136 deprecated. */
8137#define warn_deprecated_sp(reg) \
8138 do \
8139 if (warn_on_deprecated && reg == REG_SP) \
8140 as_tsktsk (_("use of r13 is deprecated")); \
8141 while (0)
8142
8143/* Functions for operand encoding. ARM, then Thumb. */
8144
8145#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8146
8147/* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8148
8149 The only binary encoding difference is the Coprocessor number. Coprocessor
8150 9 is used for half-precision calculations or conversions. The format of the
8151 instruction is the same as the equivalent Coprocessor 10 instruction that
8152 exists for Single-Precision operation. */
8153
8154static void
8155do_scalar_fp16_v82_encode (void)
8156{
8157 if (inst.cond < COND_ALWAYS)
8158 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8159 " the behaviour is UNPREDICTABLE"));
8160 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8161 _(BAD_FP16));
8162
8163 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8164 mark_feature_used (&arm_ext_fp16);
8165}
8166
8167/* If VAL can be encoded in the immediate field of an ARM instruction,
8168 return the encoded form. Otherwise, return FAIL. */
8169
8170static unsigned int
8171encode_arm_immediate (unsigned int val)
8172{
8173 unsigned int a, i;
8174
8175 if (val <= 0xff)
8176 return val;
8177
8178 for (i = 2; i < 32; i += 2)
8179 if ((a = rotate_left (val, i)) <= 0xff)
8180 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
8181
8182 return FAIL;
8183}
8184
8185/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8186 return the encoded form. Otherwise, return FAIL. */
8187static unsigned int
8188encode_thumb32_immediate (unsigned int val)
8189{
8190 unsigned int a, i;
8191
8192 if (val <= 0xff)
8193 return val;
8194
8195 for (i = 1; i <= 24; i++)
8196 {
8197 a = val >> i;
8198 if ((val & ~(0xff << i)) == 0)
8199 return ((val >> i) & 0x7f) | ((32 - i) << 7);
8200 }
8201
8202 a = val & 0xff;
8203 if (val == ((a << 16) | a))
8204 return 0x100 | a;
8205 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8206 return 0x300 | a;
8207
8208 a = val & 0xff00;
8209 if (val == ((a << 16) | a))
8210 return 0x200 | (a >> 8);
8211
8212 return FAIL;
8213}
8214/* Encode a VFP SP or DP register number into inst.instruction. */
8215
8216static void
8217encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8218{
8219 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8220 && reg > 15)
8221 {
8222 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8223 {
8224 if (thumb_mode)
8225 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8226 fpu_vfp_ext_d32);
8227 else
8228 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8229 fpu_vfp_ext_d32);
8230 }
8231 else
8232 {
8233 first_error (_("D register out of range for selected VFP version"));
8234 return;
8235 }
8236 }
8237
8238 switch (pos)
8239 {
8240 case VFP_REG_Sd:
8241 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8242 break;
8243
8244 case VFP_REG_Sn:
8245 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8246 break;
8247
8248 case VFP_REG_Sm:
8249 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8250 break;
8251
8252 case VFP_REG_Dd:
8253 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8254 break;
8255
8256 case VFP_REG_Dn:
8257 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8258 break;
8259
8260 case VFP_REG_Dm:
8261 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8262 break;
8263
8264 default:
8265 abort ();
8266 }
8267}
8268
8269/* Encode a <shift> in an ARM-format instruction. The immediate,
8270 if any, is handled by md_apply_fix. */
8271static void
8272encode_arm_shift (int i)
8273{
8274 /* register-shifted register. */
8275 if (inst.operands[i].immisreg)
8276 {
8277 int op_index;
8278 for (op_index = 0; op_index <= i; ++op_index)
8279 {
8280 /* Check the operand only when it's presented. In pre-UAL syntax,
8281 if the destination register is the same as the first operand, two
8282 register form of the instruction can be used. */
8283 if (inst.operands[op_index].present && inst.operands[op_index].isreg
8284 && inst.operands[op_index].reg == REG_PC)
8285 as_warn (UNPRED_REG ("r15"));
8286 }
8287
8288 if (inst.operands[i].imm == REG_PC)
8289 as_warn (UNPRED_REG ("r15"));
8290 }
8291
8292 if (inst.operands[i].shift_kind == SHIFT_RRX)
8293 inst.instruction |= SHIFT_ROR << 5;
8294 else
8295 {
8296 inst.instruction |= inst.operands[i].shift_kind << 5;
8297 if (inst.operands[i].immisreg)
8298 {
8299 inst.instruction |= SHIFT_BY_REG;
8300 inst.instruction |= inst.operands[i].imm << 8;
8301 }
8302 else
8303 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8304 }
8305}
8306
8307static void
8308encode_arm_shifter_operand (int i)
8309{
8310 if (inst.operands[i].isreg)
8311 {
8312 inst.instruction |= inst.operands[i].reg;
8313 encode_arm_shift (i);
8314 }
8315 else
8316 {
8317 inst.instruction |= INST_IMMEDIATE;
8318 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8319 inst.instruction |= inst.operands[i].imm;
8320 }
8321}
8322
8323/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
8324static void
8325encode_arm_addr_mode_common (int i, bfd_boolean is_t)
8326{
8327 /* PR 14260:
8328 Generate an error if the operand is not a register. */
8329 constraint (!inst.operands[i].isreg,
8330 _("Instruction does not support =N addresses"));
8331
8332 inst.instruction |= inst.operands[i].reg << 16;
8333
8334 if (inst.operands[i].preind)
8335 {
8336 if (is_t)
8337 {
8338 inst.error = _("instruction does not accept preindexed addressing");
8339 return;
8340 }
8341 inst.instruction |= PRE_INDEX;
8342 if (inst.operands[i].writeback)
8343 inst.instruction |= WRITE_BACK;
8344
8345 }
8346 else if (inst.operands[i].postind)
8347 {
8348 gas_assert (inst.operands[i].writeback);
8349 if (is_t)
8350 inst.instruction |= WRITE_BACK;
8351 }
8352 else /* unindexed - only for coprocessor */
8353 {
8354 inst.error = _("instruction does not accept unindexed addressing");
8355 return;
8356 }
8357
8358 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8359 && (((inst.instruction & 0x000f0000) >> 16)
8360 == ((inst.instruction & 0x0000f000) >> 12)))
8361 as_warn ((inst.instruction & LOAD_BIT)
8362 ? _("destination register same as write-back base")
8363 : _("source register same as write-back base"));
8364}
8365
8366/* inst.operands[i] was set up by parse_address. Encode it into an
8367 ARM-format mode 2 load or store instruction. If is_t is true,
8368 reject forms that cannot be used with a T instruction (i.e. not
8369 post-indexed). */
8370static void
8371encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8372{
8373 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8374
8375 encode_arm_addr_mode_common (i, is_t);
8376
8377 if (inst.operands[i].immisreg)
8378 {
8379 constraint ((inst.operands[i].imm == REG_PC
8380 || (is_pc && inst.operands[i].writeback)),
8381 BAD_PC_ADDRESSING);
8382 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8383 inst.instruction |= inst.operands[i].imm;
8384 if (!inst.operands[i].negative)
8385 inst.instruction |= INDEX_UP;
8386 if (inst.operands[i].shifted)
8387 {
8388 if (inst.operands[i].shift_kind == SHIFT_RRX)
8389 inst.instruction |= SHIFT_ROR << 5;
8390 else
8391 {
8392 inst.instruction |= inst.operands[i].shift_kind << 5;
8393 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8394 }
8395 }
8396 }
8397 else /* immediate offset in inst.relocs[0] */
8398 {
8399 if (is_pc && !inst.relocs[0].pc_rel)
8400 {
8401 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8402
8403 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8404 cannot use PC in addressing.
8405 PC cannot be used in writeback addressing, either. */
8406 constraint ((is_t || inst.operands[i].writeback),
8407 BAD_PC_ADDRESSING);
8408
8409 /* Use of PC in str is deprecated for ARMv7. */
8410 if (warn_on_deprecated
8411 && !is_load
8412 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8413 as_tsktsk (_("use of PC in this instruction is deprecated"));
8414 }
8415
8416 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8417 {
8418 /* Prefer + for zero encoded value. */
8419 if (!inst.operands[i].negative)
8420 inst.instruction |= INDEX_UP;
8421 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8422 }
8423 }
8424}
8425
8426/* inst.operands[i] was set up by parse_address. Encode it into an
8427 ARM-format mode 3 load or store instruction. Reject forms that
8428 cannot be used with such instructions. If is_t is true, reject
8429 forms that cannot be used with a T instruction (i.e. not
8430 post-indexed). */
8431static void
8432encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8433{
8434 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8435 {
8436 inst.error = _("instruction does not accept scaled register index");
8437 return;
8438 }
8439
8440 encode_arm_addr_mode_common (i, is_t);
8441
8442 if (inst.operands[i].immisreg)
8443 {
8444 constraint ((inst.operands[i].imm == REG_PC
8445 || (is_t && inst.operands[i].reg == REG_PC)),
8446 BAD_PC_ADDRESSING);
8447 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8448 BAD_PC_WRITEBACK);
8449 inst.instruction |= inst.operands[i].imm;
8450 if (!inst.operands[i].negative)
8451 inst.instruction |= INDEX_UP;
8452 }
8453 else /* immediate offset in inst.relocs[0] */
8454 {
8455 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8456 && inst.operands[i].writeback),
8457 BAD_PC_WRITEBACK);
8458 inst.instruction |= HWOFFSET_IMM;
8459 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8460 {
8461 /* Prefer + for zero encoded value. */
8462 if (!inst.operands[i].negative)
8463 inst.instruction |= INDEX_UP;
8464
8465 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8466 }
8467 }
8468}
8469
8470/* Write immediate bits [7:0] to the following locations:
8471
8472 |28/24|23 19|18 16|15 4|3 0|
8473 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8474
8475 This function is used by VMOV/VMVN/VORR/VBIC. */
8476
8477static void
8478neon_write_immbits (unsigned immbits)
8479{
8480 inst.instruction |= immbits & 0xf;
8481 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8482 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8483}
8484
8485/* Invert low-order SIZE bits of XHI:XLO. */
8486
8487static void
8488neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8489{
8490 unsigned immlo = xlo ? *xlo : 0;
8491 unsigned immhi = xhi ? *xhi : 0;
8492
8493 switch (size)
8494 {
8495 case 8:
8496 immlo = (~immlo) & 0xff;
8497 break;
8498
8499 case 16:
8500 immlo = (~immlo) & 0xffff;
8501 break;
8502
8503 case 64:
8504 immhi = (~immhi) & 0xffffffff;
8505 /* fall through. */
8506
8507 case 32:
8508 immlo = (~immlo) & 0xffffffff;
8509 break;
8510
8511 default:
8512 abort ();
8513 }
8514
8515 if (xlo)
8516 *xlo = immlo;
8517
8518 if (xhi)
8519 *xhi = immhi;
8520}
8521
8522/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8523 A, B, C, D. */
8524
8525static int
8526neon_bits_same_in_bytes (unsigned imm)
8527{
8528 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8529 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8530 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8531 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8532}
8533
8534/* For immediate of above form, return 0bABCD. */
8535
8536static unsigned
8537neon_squash_bits (unsigned imm)
8538{
8539 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8540 | ((imm & 0x01000000) >> 21);
8541}
8542
8543/* Compress quarter-float representation to 0b...000 abcdefgh. */
8544
8545static unsigned
8546neon_qfloat_bits (unsigned imm)
8547{
8548 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8549}
8550
8551/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8552 the instruction. *OP is passed as the initial value of the op field, and
8553 may be set to a different value depending on the constant (i.e.
8554 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8555 MVN). If the immediate looks like a repeated pattern then also
8556 try smaller element sizes. */
8557
8558static int
8559neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8560 unsigned *immbits, int *op, int size,
8561 enum neon_el_type type)
8562{
8563 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8564 float. */
8565 if (type == NT_float && !float_p)
8566 return FAIL;
8567
8568 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8569 {
8570 if (size != 32 || *op == 1)
8571 return FAIL;
8572 *immbits = neon_qfloat_bits (immlo);
8573 return 0xf;
8574 }
8575
8576 if (size == 64)
8577 {
8578 if (neon_bits_same_in_bytes (immhi)
8579 && neon_bits_same_in_bytes (immlo))
8580 {
8581 if (*op == 1)
8582 return FAIL;
8583 *immbits = (neon_squash_bits (immhi) << 4)
8584 | neon_squash_bits (immlo);
8585 *op = 1;
8586 return 0xe;
8587 }
8588
8589 if (immhi != immlo)
8590 return FAIL;
8591 }
8592
8593 if (size >= 32)
8594 {
8595 if (immlo == (immlo & 0x000000ff))
8596 {
8597 *immbits = immlo;
8598 return 0x0;
8599 }
8600 else if (immlo == (immlo & 0x0000ff00))
8601 {
8602 *immbits = immlo >> 8;
8603 return 0x2;
8604 }
8605 else if (immlo == (immlo & 0x00ff0000))
8606 {
8607 *immbits = immlo >> 16;
8608 return 0x4;
8609 }
8610 else if (immlo == (immlo & 0xff000000))
8611 {
8612 *immbits = immlo >> 24;
8613 return 0x6;
8614 }
8615 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8616 {
8617 *immbits = (immlo >> 8) & 0xff;
8618 return 0xc;
8619 }
8620 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8621 {
8622 *immbits = (immlo >> 16) & 0xff;
8623 return 0xd;
8624 }
8625
8626 if ((immlo & 0xffff) != (immlo >> 16))
8627 return FAIL;
8628 immlo &= 0xffff;
8629 }
8630
8631 if (size >= 16)
8632 {
8633 if (immlo == (immlo & 0x000000ff))
8634 {
8635 *immbits = immlo;
8636 return 0x8;
8637 }
8638 else if (immlo == (immlo & 0x0000ff00))
8639 {
8640 *immbits = immlo >> 8;
8641 return 0xa;
8642 }
8643
8644 if ((immlo & 0xff) != (immlo >> 8))
8645 return FAIL;
8646 immlo &= 0xff;
8647 }
8648
8649 if (immlo == (immlo & 0x000000ff))
8650 {
8651 /* Don't allow MVN with 8-bit immediate. */
8652 if (*op == 1)
8653 return FAIL;
8654 *immbits = immlo;
8655 return 0xe;
8656 }
8657
8658 return FAIL;
8659}
8660
8661#if defined BFD_HOST_64_BIT
8662/* Returns TRUE if double precision value V may be cast
8663 to single precision without loss of accuracy. */
8664
8665static bfd_boolean
8666is_double_a_single (bfd_int64_t v)
8667{
8668 int exp = (int)((v >> 52) & 0x7FF);
8669 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8670
8671 return (exp == 0 || exp == 0x7FF
8672 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8673 && (mantissa & 0x1FFFFFFFl) == 0;
8674}
8675
8676/* Returns a double precision value casted to single precision
8677 (ignoring the least significant bits in exponent and mantissa). */
8678
8679static int
8680double_to_single (bfd_int64_t v)
8681{
8682 int sign = (int) ((v >> 63) & 1l);
8683 int exp = (int) ((v >> 52) & 0x7FF);
8684 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8685
8686 if (exp == 0x7FF)
8687 exp = 0xFF;
8688 else
8689 {
8690 exp = exp - 1023 + 127;
8691 if (exp >= 0xFF)
8692 {
8693 /* Infinity. */
8694 exp = 0x7F;
8695 mantissa = 0;
8696 }
8697 else if (exp < 0)
8698 {
8699 /* No denormalized numbers. */
8700 exp = 0;
8701 mantissa = 0;
8702 }
8703 }
8704 mantissa >>= 29;
8705 return (sign << 31) | (exp << 23) | mantissa;
8706}
8707#endif /* BFD_HOST_64_BIT */
8708
8709enum lit_type
8710{
8711 CONST_THUMB,
8712 CONST_ARM,
8713 CONST_VEC
8714};
8715
8716static void do_vfp_nsyn_opcode (const char *);
8717
8718/* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8719 Determine whether it can be performed with a move instruction; if
8720 it can, convert inst.instruction to that move instruction and
8721 return TRUE; if it can't, convert inst.instruction to a literal-pool
8722 load and return FALSE. If this is not a valid thing to do in the
8723 current context, set inst.error and return TRUE.
8724
8725 inst.operands[i] describes the destination register. */
8726
8727static bfd_boolean
8728move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8729{
8730 unsigned long tbit;
8731 bfd_boolean thumb_p = (t == CONST_THUMB);
8732 bfd_boolean arm_p = (t == CONST_ARM);
8733
8734 if (thumb_p)
8735 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8736 else
8737 tbit = LOAD_BIT;
8738
8739 if ((inst.instruction & tbit) == 0)
8740 {
8741 inst.error = _("invalid pseudo operation");
8742 return TRUE;
8743 }
8744
8745 if (inst.relocs[0].exp.X_op != O_constant
8746 && inst.relocs[0].exp.X_op != O_symbol
8747 && inst.relocs[0].exp.X_op != O_big)
8748 {
8749 inst.error = _("constant expression expected");
8750 return TRUE;
8751 }
8752
8753 if (inst.relocs[0].exp.X_op == O_constant
8754 || inst.relocs[0].exp.X_op == O_big)
8755 {
8756#if defined BFD_HOST_64_BIT
8757 bfd_int64_t v;
8758#else
8759 offsetT v;
8760#endif
8761 if (inst.relocs[0].exp.X_op == O_big)
8762 {
8763 LITTLENUM_TYPE w[X_PRECISION];
8764 LITTLENUM_TYPE * l;
8765
8766 if (inst.relocs[0].exp.X_add_number == -1)
8767 {
8768 gen_to_words (w, X_PRECISION, E_PRECISION);
8769 l = w;
8770 /* FIXME: Should we check words w[2..5] ? */
8771 }
8772 else
8773 l = generic_bignum;
8774
8775#if defined BFD_HOST_64_BIT
8776 v =
8777 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8778 << LITTLENUM_NUMBER_OF_BITS)
8779 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8780 << LITTLENUM_NUMBER_OF_BITS)
8781 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8782 << LITTLENUM_NUMBER_OF_BITS)
8783 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8784#else
8785 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8786 | (l[0] & LITTLENUM_MASK);
8787#endif
8788 }
8789 else
8790 v = inst.relocs[0].exp.X_add_number;
8791
8792 if (!inst.operands[i].issingle)
8793 {
8794 if (thumb_p)
8795 {
8796 /* LDR should not use lead in a flag-setting instruction being
8797 chosen so we do not check whether movs can be used. */
8798
8799 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8800 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8801 && inst.operands[i].reg != 13
8802 && inst.operands[i].reg != 15)
8803 {
8804 /* Check if on thumb2 it can be done with a mov.w, mvn or
8805 movw instruction. */
8806 unsigned int newimm;
8807 bfd_boolean isNegated;
8808
8809 newimm = encode_thumb32_immediate (v);
8810 if (newimm != (unsigned int) FAIL)
8811 isNegated = FALSE;
8812 else
8813 {
8814 newimm = encode_thumb32_immediate (~v);
8815 if (newimm != (unsigned int) FAIL)
8816 isNegated = TRUE;
8817 }
8818
8819 /* The number can be loaded with a mov.w or mvn
8820 instruction. */
8821 if (newimm != (unsigned int) FAIL
8822 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8823 {
8824 inst.instruction = (0xf04f0000 /* MOV.W. */
8825 | (inst.operands[i].reg << 8));
8826 /* Change to MOVN. */
8827 inst.instruction |= (isNegated ? 0x200000 : 0);
8828 inst.instruction |= (newimm & 0x800) << 15;
8829 inst.instruction |= (newimm & 0x700) << 4;
8830 inst.instruction |= (newimm & 0x0ff);
8831 return TRUE;
8832 }
8833 /* The number can be loaded with a movw instruction. */
8834 else if ((v & ~0xFFFF) == 0
8835 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8836 {
8837 int imm = v & 0xFFFF;
8838
8839 inst.instruction = 0xf2400000; /* MOVW. */
8840 inst.instruction |= (inst.operands[i].reg << 8);
8841 inst.instruction |= (imm & 0xf000) << 4;
8842 inst.instruction |= (imm & 0x0800) << 15;
8843 inst.instruction |= (imm & 0x0700) << 4;
8844 inst.instruction |= (imm & 0x00ff);
8845 /* In case this replacement is being done on Armv8-M
8846 Baseline we need to make sure to disable the
8847 instruction size check, as otherwise GAS will reject
8848 the use of this T32 instruction. */
8849 inst.size_req = 0;
8850 return TRUE;
8851 }
8852 }
8853 }
8854 else if (arm_p)
8855 {
8856 int value = encode_arm_immediate (v);
8857
8858 if (value != FAIL)
8859 {
8860 /* This can be done with a mov instruction. */
8861 inst.instruction &= LITERAL_MASK;
8862 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8863 inst.instruction |= value & 0xfff;
8864 return TRUE;
8865 }
8866
8867 value = encode_arm_immediate (~ v);
8868 if (value != FAIL)
8869 {
8870 /* This can be done with a mvn instruction. */
8871 inst.instruction &= LITERAL_MASK;
8872 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8873 inst.instruction |= value & 0xfff;
8874 return TRUE;
8875 }
8876 }
8877 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8878 {
8879 int op = 0;
8880 unsigned immbits = 0;
8881 unsigned immlo = inst.operands[1].imm;
8882 unsigned immhi = inst.operands[1].regisimm
8883 ? inst.operands[1].reg
8884 : inst.relocs[0].exp.X_unsigned
8885 ? 0
8886 : ((bfd_int64_t)((int) immlo)) >> 32;
8887 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8888 &op, 64, NT_invtype);
8889
8890 if (cmode == FAIL)
8891 {
8892 neon_invert_size (&immlo, &immhi, 64);
8893 op = !op;
8894 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8895 &op, 64, NT_invtype);
8896 }
8897
8898 if (cmode != FAIL)
8899 {
8900 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8901 | (1 << 23)
8902 | (cmode << 8)
8903 | (op << 5)
8904 | (1 << 4);
8905
8906 /* Fill other bits in vmov encoding for both thumb and arm. */
8907 if (thumb_mode)
8908 inst.instruction |= (0x7U << 29) | (0xF << 24);
8909 else
8910 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8911 neon_write_immbits (immbits);
8912 return TRUE;
8913 }
8914 }
8915 }
8916
8917 if (t == CONST_VEC)
8918 {
8919 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8920 if (inst.operands[i].issingle
8921 && is_quarter_float (inst.operands[1].imm)
8922 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8923 {
8924 inst.operands[1].imm =
8925 neon_qfloat_bits (v);
8926 do_vfp_nsyn_opcode ("fconsts");
8927 return TRUE;
8928 }
8929
8930 /* If our host does not support a 64-bit type then we cannot perform
8931 the following optimization. This mean that there will be a
8932 discrepancy between the output produced by an assembler built for
8933 a 32-bit-only host and the output produced from a 64-bit host, but
8934 this cannot be helped. */
8935#if defined BFD_HOST_64_BIT
8936 else if (!inst.operands[1].issingle
8937 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8938 {
8939 if (is_double_a_single (v)
8940 && is_quarter_float (double_to_single (v)))
8941 {
8942 inst.operands[1].imm =
8943 neon_qfloat_bits (double_to_single (v));
8944 do_vfp_nsyn_opcode ("fconstd");
8945 return TRUE;
8946 }
8947 }
8948#endif
8949 }
8950 }
8951
8952 if (add_to_lit_pool ((!inst.operands[i].isvec
8953 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8954 return TRUE;
8955
8956 inst.operands[1].reg = REG_PC;
8957 inst.operands[1].isreg = 1;
8958 inst.operands[1].preind = 1;
8959 inst.relocs[0].pc_rel = 1;
8960 inst.relocs[0].type = (thumb_p
8961 ? BFD_RELOC_ARM_THUMB_OFFSET
8962 : (mode_3
8963 ? BFD_RELOC_ARM_HWLITERAL
8964 : BFD_RELOC_ARM_LITERAL));
8965 return FALSE;
8966}
8967
8968/* inst.operands[i] was set up by parse_address. Encode it into an
8969 ARM-format instruction. Reject all forms which cannot be encoded
8970 into a coprocessor load/store instruction. If wb_ok is false,
8971 reject use of writeback; if unind_ok is false, reject use of
8972 unindexed addressing. If reloc_override is not 0, use it instead
8973 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8974 (in which case it is preserved). */
8975
8976static int
8977encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8978{
8979 if (!inst.operands[i].isreg)
8980 {
8981 /* PR 18256 */
8982 if (! inst.operands[0].isvec)
8983 {
8984 inst.error = _("invalid co-processor operand");
8985 return FAIL;
8986 }
8987 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8988 return SUCCESS;
8989 }
8990
8991 inst.instruction |= inst.operands[i].reg << 16;
8992
8993 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8994
8995 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8996 {
8997 gas_assert (!inst.operands[i].writeback);
8998 if (!unind_ok)
8999 {
9000 inst.error = _("instruction does not support unindexed addressing");
9001 return FAIL;
9002 }
9003 inst.instruction |= inst.operands[i].imm;
9004 inst.instruction |= INDEX_UP;
9005 return SUCCESS;
9006 }
9007
9008 if (inst.operands[i].preind)
9009 inst.instruction |= PRE_INDEX;
9010
9011 if (inst.operands[i].writeback)
9012 {
9013 if (inst.operands[i].reg == REG_PC)
9014 {
9015 inst.error = _("pc may not be used with write-back");
9016 return FAIL;
9017 }
9018 if (!wb_ok)
9019 {
9020 inst.error = _("instruction does not support writeback");
9021 return FAIL;
9022 }
9023 inst.instruction |= WRITE_BACK;
9024 }
9025
9026 if (reloc_override)
9027 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9028 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9029 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9030 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9031 {
9032 if (thumb_mode)
9033 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9034 else
9035 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9036 }
9037
9038 /* Prefer + for zero encoded value. */
9039 if (!inst.operands[i].negative)
9040 inst.instruction |= INDEX_UP;
9041
9042 return SUCCESS;
9043}
9044
9045/* Functions for instruction encoding, sorted by sub-architecture.
9046 First some generics; their names are taken from the conventional
9047 bit positions for register arguments in ARM format instructions. */
9048
9049static void
9050do_noargs (void)
9051{
9052}
9053
9054static void
9055do_rd (void)
9056{
9057 inst.instruction |= inst.operands[0].reg << 12;
9058}
9059
9060static void
9061do_rn (void)
9062{
9063 inst.instruction |= inst.operands[0].reg << 16;
9064}
9065
9066static void
9067do_rd_rm (void)
9068{
9069 inst.instruction |= inst.operands[0].reg << 12;
9070 inst.instruction |= inst.operands[1].reg;
9071}
9072
9073static void
9074do_rm_rn (void)
9075{
9076 inst.instruction |= inst.operands[0].reg;
9077 inst.instruction |= inst.operands[1].reg << 16;
9078}
9079
9080static void
9081do_rd_rn (void)
9082{
9083 inst.instruction |= inst.operands[0].reg << 12;
9084 inst.instruction |= inst.operands[1].reg << 16;
9085}
9086
9087static void
9088do_rn_rd (void)
9089{
9090 inst.instruction |= inst.operands[0].reg << 16;
9091 inst.instruction |= inst.operands[1].reg << 12;
9092}
9093
9094static void
9095do_tt (void)
9096{
9097 inst.instruction |= inst.operands[0].reg << 8;
9098 inst.instruction |= inst.operands[1].reg << 16;
9099}
9100
9101static bfd_boolean
9102check_obsolete (const arm_feature_set *feature, const char *msg)
9103{
9104 if (ARM_CPU_IS_ANY (cpu_variant))
9105 {
9106 as_tsktsk ("%s", msg);
9107 return TRUE;
9108 }
9109 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9110 {
9111 as_bad ("%s", msg);
9112 return TRUE;
9113 }
9114
9115 return FALSE;
9116}
9117
9118static void
9119do_rd_rm_rn (void)
9120{
9121 unsigned Rn = inst.operands[2].reg;
9122 /* Enforce restrictions on SWP instruction. */
9123 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9124 {
9125 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9126 _("Rn must not overlap other operands"));
9127
9128 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9129 */
9130 if (!check_obsolete (&arm_ext_v8,
9131 _("swp{b} use is obsoleted for ARMv8 and later"))
9132 && warn_on_deprecated
9133 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9134 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9135 }
9136
9137 inst.instruction |= inst.operands[0].reg << 12;
9138 inst.instruction |= inst.operands[1].reg;
9139 inst.instruction |= Rn << 16;
9140}
9141
9142static void
9143do_rd_rn_rm (void)
9144{
9145 inst.instruction |= inst.operands[0].reg << 12;
9146 inst.instruction |= inst.operands[1].reg << 16;
9147 inst.instruction |= inst.operands[2].reg;
9148}
9149
9150static void
9151do_rm_rd_rn (void)
9152{
9153 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9154 constraint (((inst.relocs[0].exp.X_op != O_constant
9155 && inst.relocs[0].exp.X_op != O_illegal)
9156 || inst.relocs[0].exp.X_add_number != 0),
9157 BAD_ADDR_MODE);
9158 inst.instruction |= inst.operands[0].reg;
9159 inst.instruction |= inst.operands[1].reg << 12;
9160 inst.instruction |= inst.operands[2].reg << 16;
9161}
9162
9163static void
9164do_imm0 (void)
9165{
9166 inst.instruction |= inst.operands[0].imm;
9167}
9168
9169static void
9170do_rd_cpaddr (void)
9171{
9172 inst.instruction |= inst.operands[0].reg << 12;
9173 encode_arm_cp_address (1, TRUE, TRUE, 0);
9174}
9175
9176/* ARM instructions, in alphabetical order by function name (except
9177 that wrapper functions appear immediately after the function they
9178 wrap). */
9179
9180/* This is a pseudo-op of the form "adr rd, label" to be converted
9181 into a relative address of the form "add rd, pc, #label-.-8". */
9182
9183static void
9184do_adr (void)
9185{
9186 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9187
9188 /* Frag hacking will turn this into a sub instruction if the offset turns
9189 out to be negative. */
9190 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9191 inst.relocs[0].pc_rel = 1;
9192 inst.relocs[0].exp.X_add_number -= 8;
9193
9194 if (support_interwork
9195 && inst.relocs[0].exp.X_op == O_symbol
9196 && inst.relocs[0].exp.X_add_symbol != NULL
9197 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9198 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9199 inst.relocs[0].exp.X_add_number |= 1;
9200}
9201
9202/* This is a pseudo-op of the form "adrl rd, label" to be converted
9203 into a relative address of the form:
9204 add rd, pc, #low(label-.-8)"
9205 add rd, rd, #high(label-.-8)" */
9206
9207static void
9208do_adrl (void)
9209{
9210 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9211
9212 /* Frag hacking will turn this into a sub instruction if the offset turns
9213 out to be negative. */
9214 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9215 inst.relocs[0].pc_rel = 1;
9216 inst.size = INSN_SIZE * 2;
9217 inst.relocs[0].exp.X_add_number -= 8;
9218
9219 if (support_interwork
9220 && inst.relocs[0].exp.X_op == O_symbol
9221 && inst.relocs[0].exp.X_add_symbol != NULL
9222 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9223 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9224 inst.relocs[0].exp.X_add_number |= 1;
9225}
9226
9227static void
9228do_arit (void)
9229{
9230 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9231 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9232 THUMB1_RELOC_ONLY);
9233 if (!inst.operands[1].present)
9234 inst.operands[1].reg = inst.operands[0].reg;
9235 inst.instruction |= inst.operands[0].reg << 12;
9236 inst.instruction |= inst.operands[1].reg << 16;
9237 encode_arm_shifter_operand (2);
9238}
9239
9240static void
9241do_barrier (void)
9242{
9243 if (inst.operands[0].present)
9244 inst.instruction |= inst.operands[0].imm;
9245 else
9246 inst.instruction |= 0xf;
9247}
9248
9249static void
9250do_bfc (void)
9251{
9252 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9253 constraint (msb > 32, _("bit-field extends past end of register"));
9254 /* The instruction encoding stores the LSB and MSB,
9255 not the LSB and width. */
9256 inst.instruction |= inst.operands[0].reg << 12;
9257 inst.instruction |= inst.operands[1].imm << 7;
9258 inst.instruction |= (msb - 1) << 16;
9259}
9260
9261static void
9262do_bfi (void)
9263{
9264 unsigned int msb;
9265
9266 /* #0 in second position is alternative syntax for bfc, which is
9267 the same instruction but with REG_PC in the Rm field. */
9268 if (!inst.operands[1].isreg)
9269 inst.operands[1].reg = REG_PC;
9270
9271 msb = inst.operands[2].imm + inst.operands[3].imm;
9272 constraint (msb > 32, _("bit-field extends past end of register"));
9273 /* The instruction encoding stores the LSB and MSB,
9274 not the LSB and width. */
9275 inst.instruction |= inst.operands[0].reg << 12;
9276 inst.instruction |= inst.operands[1].reg;
9277 inst.instruction |= inst.operands[2].imm << 7;
9278 inst.instruction |= (msb - 1) << 16;
9279}
9280
9281static void
9282do_bfx (void)
9283{
9284 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9285 _("bit-field extends past end of register"));
9286 inst.instruction |= inst.operands[0].reg << 12;
9287 inst.instruction |= inst.operands[1].reg;
9288 inst.instruction |= inst.operands[2].imm << 7;
9289 inst.instruction |= (inst.operands[3].imm - 1) << 16;
9290}
9291
9292/* ARM V5 breakpoint instruction (argument parse)
9293 BKPT <16 bit unsigned immediate>
9294 Instruction is not conditional.
9295 The bit pattern given in insns[] has the COND_ALWAYS condition,
9296 and it is an error if the caller tried to override that. */
9297
9298static void
9299do_bkpt (void)
9300{
9301 /* Top 12 of 16 bits to bits 19:8. */
9302 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9303
9304 /* Bottom 4 of 16 bits to bits 3:0. */
9305 inst.instruction |= inst.operands[0].imm & 0xf;
9306}
9307
9308static void
9309encode_branch (int default_reloc)
9310{
9311 if (inst.operands[0].hasreloc)
9312 {
9313 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9314 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9315 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9316 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9317 ? BFD_RELOC_ARM_PLT32
9318 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9319 }
9320 else
9321 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9322 inst.relocs[0].pc_rel = 1;
9323}
9324
9325static void
9326do_branch (void)
9327{
9328#ifdef OBJ_ELF
9329 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9330 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9331 else
9332#endif
9333 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9334}
9335
9336static void
9337do_bl (void)
9338{
9339#ifdef OBJ_ELF
9340 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9341 {
9342 if (inst.cond == COND_ALWAYS)
9343 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9344 else
9345 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9346 }
9347 else
9348#endif
9349 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9350}
9351
9352/* ARM V5 branch-link-exchange instruction (argument parse)
9353 BLX <target_addr> ie BLX(1)
9354 BLX{<condition>} <Rm> ie BLX(2)
9355 Unfortunately, there are two different opcodes for this mnemonic.
9356 So, the insns[].value is not used, and the code here zaps values
9357 into inst.instruction.
9358 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9359
9360static void
9361do_blx (void)
9362{
9363 if (inst.operands[0].isreg)
9364 {
9365 /* Arg is a register; the opcode provided by insns[] is correct.
9366 It is not illegal to do "blx pc", just useless. */
9367 if (inst.operands[0].reg == REG_PC)
9368 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9369
9370 inst.instruction |= inst.operands[0].reg;
9371 }
9372 else
9373 {
9374 /* Arg is an address; this instruction cannot be executed
9375 conditionally, and the opcode must be adjusted.
9376 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9377 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9378 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9379 inst.instruction = 0xfa000000;
9380 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9381 }
9382}
9383
9384static void
9385do_bx (void)
9386{
9387 bfd_boolean want_reloc;
9388
9389 if (inst.operands[0].reg == REG_PC)
9390 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9391
9392 inst.instruction |= inst.operands[0].reg;
9393 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9394 it is for ARMv4t or earlier. */
9395 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9396 if (!ARM_FEATURE_ZERO (selected_object_arch)
9397 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9398 want_reloc = TRUE;
9399
9400#ifdef OBJ_ELF
9401 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9402#endif
9403 want_reloc = FALSE;
9404
9405 if (want_reloc)
9406 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9407}
9408
9409
9410/* ARM v5TEJ. Jump to Jazelle code. */
9411
9412static void
9413do_bxj (void)
9414{
9415 if (inst.operands[0].reg == REG_PC)
9416 as_tsktsk (_("use of r15 in bxj is not really useful"));
9417
9418 inst.instruction |= inst.operands[0].reg;
9419}
9420
9421/* Co-processor data operation:
9422 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9423 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9424static void
9425do_cdp (void)
9426{
9427 inst.instruction |= inst.operands[0].reg << 8;
9428 inst.instruction |= inst.operands[1].imm << 20;
9429 inst.instruction |= inst.operands[2].reg << 12;
9430 inst.instruction |= inst.operands[3].reg << 16;
9431 inst.instruction |= inst.operands[4].reg;
9432 inst.instruction |= inst.operands[5].imm << 5;
9433}
9434
9435static void
9436do_cmp (void)
9437{
9438 inst.instruction |= inst.operands[0].reg << 16;
9439 encode_arm_shifter_operand (1);
9440}
9441
9442/* Transfer between coprocessor and ARM registers.
9443 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9444 MRC2
9445 MCR{cond}
9446 MCR2
9447
9448 No special properties. */
9449
9450struct deprecated_coproc_regs_s
9451{
9452 unsigned cp;
9453 int opc1;
9454 unsigned crn;
9455 unsigned crm;
9456 int opc2;
9457 arm_feature_set deprecated;
9458 arm_feature_set obsoleted;
9459 const char *dep_msg;
9460 const char *obs_msg;
9461};
9462
9463#define DEPR_ACCESS_V8 \
9464 N_("This coprocessor register access is deprecated in ARMv8")
9465
9466/* Table of all deprecated coprocessor registers. */
9467static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9468{
9469 {15, 0, 7, 10, 5, /* CP15DMB. */
9470 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9471 DEPR_ACCESS_V8, NULL},
9472 {15, 0, 7, 10, 4, /* CP15DSB. */
9473 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9474 DEPR_ACCESS_V8, NULL},
9475 {15, 0, 7, 5, 4, /* CP15ISB. */
9476 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9477 DEPR_ACCESS_V8, NULL},
9478 {14, 6, 1, 0, 0, /* TEEHBR. */
9479 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9480 DEPR_ACCESS_V8, NULL},
9481 {14, 6, 0, 0, 0, /* TEECR. */
9482 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9483 DEPR_ACCESS_V8, NULL},
9484};
9485
9486#undef DEPR_ACCESS_V8
9487
9488static const size_t deprecated_coproc_reg_count =
9489 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9490
9491static void
9492do_co_reg (void)
9493{
9494 unsigned Rd;
9495 size_t i;
9496
9497 Rd = inst.operands[2].reg;
9498 if (thumb_mode)
9499 {
9500 if (inst.instruction == 0xee000010
9501 || inst.instruction == 0xfe000010)
9502 /* MCR, MCR2 */
9503 reject_bad_reg (Rd);
9504 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9505 /* MRC, MRC2 */
9506 constraint (Rd == REG_SP, BAD_SP);
9507 }
9508 else
9509 {
9510 /* MCR */
9511 if (inst.instruction == 0xe000010)
9512 constraint (Rd == REG_PC, BAD_PC);
9513 }
9514
9515 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9516 {
9517 const struct deprecated_coproc_regs_s *r =
9518 deprecated_coproc_regs + i;
9519
9520 if (inst.operands[0].reg == r->cp
9521 && inst.operands[1].imm == r->opc1
9522 && inst.operands[3].reg == r->crn
9523 && inst.operands[4].reg == r->crm
9524 && inst.operands[5].imm == r->opc2)
9525 {
9526 if (! ARM_CPU_IS_ANY (cpu_variant)
9527 && warn_on_deprecated
9528 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9529 as_tsktsk ("%s", r->dep_msg);
9530 }
9531 }
9532
9533 inst.instruction |= inst.operands[0].reg << 8;
9534 inst.instruction |= inst.operands[1].imm << 21;
9535 inst.instruction |= Rd << 12;
9536 inst.instruction |= inst.operands[3].reg << 16;
9537 inst.instruction |= inst.operands[4].reg;
9538 inst.instruction |= inst.operands[5].imm << 5;
9539}
9540
9541/* Transfer between coprocessor register and pair of ARM registers.
9542 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9543 MCRR2
9544 MRRC{cond}
9545 MRRC2
9546
9547 Two XScale instructions are special cases of these:
9548
9549 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9550 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9551
9552 Result unpredictable if Rd or Rn is R15. */
9553
9554static void
9555do_co_reg2c (void)
9556{
9557 unsigned Rd, Rn;
9558
9559 Rd = inst.operands[2].reg;
9560 Rn = inst.operands[3].reg;
9561
9562 if (thumb_mode)
9563 {
9564 reject_bad_reg (Rd);
9565 reject_bad_reg (Rn);
9566 }
9567 else
9568 {
9569 constraint (Rd == REG_PC, BAD_PC);
9570 constraint (Rn == REG_PC, BAD_PC);
9571 }
9572
9573 /* Only check the MRRC{2} variants. */
9574 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9575 {
9576 /* If Rd == Rn, error that the operation is
9577 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9578 constraint (Rd == Rn, BAD_OVERLAP);
9579 }
9580
9581 inst.instruction |= inst.operands[0].reg << 8;
9582 inst.instruction |= inst.operands[1].imm << 4;
9583 inst.instruction |= Rd << 12;
9584 inst.instruction |= Rn << 16;
9585 inst.instruction |= inst.operands[4].reg;
9586}
9587
9588static void
9589do_cpsi (void)
9590{
9591 inst.instruction |= inst.operands[0].imm << 6;
9592 if (inst.operands[1].present)
9593 {
9594 inst.instruction |= CPSI_MMOD;
9595 inst.instruction |= inst.operands[1].imm;
9596 }
9597}
9598
9599static void
9600do_dbg (void)
9601{
9602 inst.instruction |= inst.operands[0].imm;
9603}
9604
9605static void
9606do_div (void)
9607{
9608 unsigned Rd, Rn, Rm;
9609
9610 Rd = inst.operands[0].reg;
9611 Rn = (inst.operands[1].present
9612 ? inst.operands[1].reg : Rd);
9613 Rm = inst.operands[2].reg;
9614
9615 constraint ((Rd == REG_PC), BAD_PC);
9616 constraint ((Rn == REG_PC), BAD_PC);
9617 constraint ((Rm == REG_PC), BAD_PC);
9618
9619 inst.instruction |= Rd << 16;
9620 inst.instruction |= Rn << 0;
9621 inst.instruction |= Rm << 8;
9622}
9623
9624static void
9625do_it (void)
9626{
9627 /* There is no IT instruction in ARM mode. We
9628 process it to do the validation as if in
9629 thumb mode, just in case the code gets
9630 assembled for thumb using the unified syntax. */
9631
9632 inst.size = 0;
9633 if (unified_syntax)
9634 {
9635 set_pred_insn_type (IT_INSN);
9636 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9637 now_pred.cc = inst.operands[0].imm;
9638 }
9639}
9640
9641/* If there is only one register in the register list,
9642 then return its register number. Otherwise return -1. */
9643static int
9644only_one_reg_in_list (int range)
9645{
9646 int i = ffs (range) - 1;
9647 return (i > 15 || range != (1 << i)) ? -1 : i;
9648}
9649
9650static void
9651encode_ldmstm(int from_push_pop_mnem)
9652{
9653 int base_reg = inst.operands[0].reg;
9654 int range = inst.operands[1].imm;
9655 int one_reg;
9656
9657 inst.instruction |= base_reg << 16;
9658 inst.instruction |= range;
9659
9660 if (inst.operands[1].writeback)
9661 inst.instruction |= LDM_TYPE_2_OR_3;
9662
9663 if (inst.operands[0].writeback)
9664 {
9665 inst.instruction |= WRITE_BACK;
9666 /* Check for unpredictable uses of writeback. */
9667 if (inst.instruction & LOAD_BIT)
9668 {
9669 /* Not allowed in LDM type 2. */
9670 if ((inst.instruction & LDM_TYPE_2_OR_3)
9671 && ((range & (1 << REG_PC)) == 0))
9672 as_warn (_("writeback of base register is UNPREDICTABLE"));
9673 /* Only allowed if base reg not in list for other types. */
9674 else if (range & (1 << base_reg))
9675 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9676 }
9677 else /* STM. */
9678 {
9679 /* Not allowed for type 2. */
9680 if (inst.instruction & LDM_TYPE_2_OR_3)
9681 as_warn (_("writeback of base register is UNPREDICTABLE"));
9682 /* Only allowed if base reg not in list, or first in list. */
9683 else if ((range & (1 << base_reg))
9684 && (range & ((1 << base_reg) - 1)))
9685 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9686 }
9687 }
9688
9689 /* If PUSH/POP has only one register, then use the A2 encoding. */
9690 one_reg = only_one_reg_in_list (range);
9691 if (from_push_pop_mnem && one_reg >= 0)
9692 {
9693 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9694
9695 if (is_push && one_reg == 13 /* SP */)
9696 /* PR 22483: The A2 encoding cannot be used when
9697 pushing the stack pointer as this is UNPREDICTABLE. */
9698 return;
9699
9700 inst.instruction &= A_COND_MASK;
9701 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9702 inst.instruction |= one_reg << 12;
9703 }
9704}
9705
9706static void
9707do_ldmstm (void)
9708{
9709 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9710}
9711
9712/* ARMv5TE load-consecutive (argument parse)
9713 Mode is like LDRH.
9714
9715 LDRccD R, mode
9716 STRccD R, mode. */
9717
9718static void
9719do_ldrd (void)
9720{
9721 constraint (inst.operands[0].reg % 2 != 0,
9722 _("first transfer register must be even"));
9723 constraint (inst.operands[1].present
9724 && inst.operands[1].reg != inst.operands[0].reg + 1,
9725 _("can only transfer two consecutive registers"));
9726 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9727 constraint (!inst.operands[2].isreg, _("'[' expected"));
9728
9729 if (!inst.operands[1].present)
9730 inst.operands[1].reg = inst.operands[0].reg + 1;
9731
9732 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9733 register and the first register written; we have to diagnose
9734 overlap between the base and the second register written here. */
9735
9736 if (inst.operands[2].reg == inst.operands[1].reg
9737 && (inst.operands[2].writeback || inst.operands[2].postind))
9738 as_warn (_("base register written back, and overlaps "
9739 "second transfer register"));
9740
9741 if (!(inst.instruction & V4_STR_BIT))
9742 {
9743 /* For an index-register load, the index register must not overlap the
9744 destination (even if not write-back). */
9745 if (inst.operands[2].immisreg
9746 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9747 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9748 as_warn (_("index register overlaps transfer register"));
9749 }
9750 inst.instruction |= inst.operands[0].reg << 12;
9751 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9752}
9753
9754static void
9755do_ldrex (void)
9756{
9757 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9758 || inst.operands[1].postind || inst.operands[1].writeback
9759 || inst.operands[1].immisreg || inst.operands[1].shifted
9760 || inst.operands[1].negative
9761 /* This can arise if the programmer has written
9762 strex rN, rM, foo
9763 or if they have mistakenly used a register name as the last
9764 operand, eg:
9765 strex rN, rM, rX
9766 It is very difficult to distinguish between these two cases
9767 because "rX" might actually be a label. ie the register
9768 name has been occluded by a symbol of the same name. So we
9769 just generate a general 'bad addressing mode' type error
9770 message and leave it up to the programmer to discover the
9771 true cause and fix their mistake. */
9772 || (inst.operands[1].reg == REG_PC),
9773 BAD_ADDR_MODE);
9774
9775 constraint (inst.relocs[0].exp.X_op != O_constant
9776 || inst.relocs[0].exp.X_add_number != 0,
9777 _("offset must be zero in ARM encoding"));
9778
9779 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9780
9781 inst.instruction |= inst.operands[0].reg << 12;
9782 inst.instruction |= inst.operands[1].reg << 16;
9783 inst.relocs[0].type = BFD_RELOC_UNUSED;
9784}
9785
9786static void
9787do_ldrexd (void)
9788{
9789 constraint (inst.operands[0].reg % 2 != 0,
9790 _("even register required"));
9791 constraint (inst.operands[1].present
9792 && inst.operands[1].reg != inst.operands[0].reg + 1,
9793 _("can only load two consecutive registers"));
9794 /* If op 1 were present and equal to PC, this function wouldn't
9795 have been called in the first place. */
9796 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9797
9798 inst.instruction |= inst.operands[0].reg << 12;
9799 inst.instruction |= inst.operands[2].reg << 16;
9800}
9801
9802/* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9803 which is not a multiple of four is UNPREDICTABLE. */
9804static void
9805check_ldr_r15_aligned (void)
9806{
9807 constraint (!(inst.operands[1].immisreg)
9808 && (inst.operands[0].reg == REG_PC
9809 && inst.operands[1].reg == REG_PC
9810 && (inst.relocs[0].exp.X_add_number & 0x3)),
9811 _("ldr to register 15 must be 4-byte aligned"));
9812}
9813
9814static void
9815do_ldst (void)
9816{
9817 inst.instruction |= inst.operands[0].reg << 12;
9818 if (!inst.operands[1].isreg)
9819 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9820 return;
9821 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9822 check_ldr_r15_aligned ();
9823}
9824
9825static void
9826do_ldstt (void)
9827{
9828 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9829 reject [Rn,...]. */
9830 if (inst.operands[1].preind)
9831 {
9832 constraint (inst.relocs[0].exp.X_op != O_constant
9833 || inst.relocs[0].exp.X_add_number != 0,
9834 _("this instruction requires a post-indexed address"));
9835
9836 inst.operands[1].preind = 0;
9837 inst.operands[1].postind = 1;
9838 inst.operands[1].writeback = 1;
9839 }
9840 inst.instruction |= inst.operands[0].reg << 12;
9841 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9842}
9843
9844/* Halfword and signed-byte load/store operations. */
9845
9846static void
9847do_ldstv4 (void)
9848{
9849 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9850 inst.instruction |= inst.operands[0].reg << 12;
9851 if (!inst.operands[1].isreg)
9852 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9853 return;
9854 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9855}
9856
9857static void
9858do_ldsttv4 (void)
9859{
9860 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9861 reject [Rn,...]. */
9862 if (inst.operands[1].preind)
9863 {
9864 constraint (inst.relocs[0].exp.X_op != O_constant
9865 || inst.relocs[0].exp.X_add_number != 0,
9866 _("this instruction requires a post-indexed address"));
9867
9868 inst.operands[1].preind = 0;
9869 inst.operands[1].postind = 1;
9870 inst.operands[1].writeback = 1;
9871 }
9872 inst.instruction |= inst.operands[0].reg << 12;
9873 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9874}
9875
9876/* Co-processor register load/store.
9877 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9878static void
9879do_lstc (void)
9880{
9881 inst.instruction |= inst.operands[0].reg << 8;
9882 inst.instruction |= inst.operands[1].reg << 12;
9883 encode_arm_cp_address (2, TRUE, TRUE, 0);
9884}
9885
9886static void
9887do_mlas (void)
9888{
9889 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9890 if (inst.operands[0].reg == inst.operands[1].reg
9891 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9892 && !(inst.instruction & 0x00400000))
9893 as_tsktsk (_("Rd and Rm should be different in mla"));
9894
9895 inst.instruction |= inst.operands[0].reg << 16;
9896 inst.instruction |= inst.operands[1].reg;
9897 inst.instruction |= inst.operands[2].reg << 8;
9898 inst.instruction |= inst.operands[3].reg << 12;
9899}
9900
9901static void
9902do_mov (void)
9903{
9904 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9905 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9906 THUMB1_RELOC_ONLY);
9907 inst.instruction |= inst.operands[0].reg << 12;
9908 encode_arm_shifter_operand (1);
9909}
9910
9911/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9912static void
9913do_mov16 (void)
9914{
9915 bfd_vma imm;
9916 bfd_boolean top;
9917
9918 top = (inst.instruction & 0x00400000) != 0;
9919 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9920 _(":lower16: not allowed in this instruction"));
9921 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9922 _(":upper16: not allowed in this instruction"));
9923 inst.instruction |= inst.operands[0].reg << 12;
9924 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9925 {
9926 imm = inst.relocs[0].exp.X_add_number;
9927 /* The value is in two pieces: 0:11, 16:19. */
9928 inst.instruction |= (imm & 0x00000fff);
9929 inst.instruction |= (imm & 0x0000f000) << 4;
9930 }
9931}
9932
9933static int
9934do_vfp_nsyn_mrs (void)
9935{
9936 if (inst.operands[0].isvec)
9937 {
9938 if (inst.operands[1].reg != 1)
9939 first_error (_("operand 1 must be FPSCR"));
9940 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9941 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9942 do_vfp_nsyn_opcode ("fmstat");
9943 }
9944 else if (inst.operands[1].isvec)
9945 do_vfp_nsyn_opcode ("fmrx");
9946 else
9947 return FAIL;
9948
9949 return SUCCESS;
9950}
9951
9952static int
9953do_vfp_nsyn_msr (void)
9954{
9955 if (inst.operands[0].isvec)
9956 do_vfp_nsyn_opcode ("fmxr");
9957 else
9958 return FAIL;
9959
9960 return SUCCESS;
9961}
9962
9963static void
9964do_vmrs (void)
9965{
9966 unsigned Rt = inst.operands[0].reg;
9967
9968 if (thumb_mode && Rt == REG_SP)
9969 {
9970 inst.error = BAD_SP;
9971 return;
9972 }
9973
9974 switch (inst.operands[1].reg)
9975 {
9976 /* MVFR2 is only valid for Armv8-A. */
9977 case 5:
9978 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9979 _(BAD_FPU));
9980 break;
9981
9982 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
9983 case 1: /* fpscr. */
9984 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
9985 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
9986 _(BAD_FPU));
9987 break;
9988
9989 case 14: /* fpcxt_ns. */
9990 case 15: /* fpcxt_s. */
9991 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
9992 _("selected processor does not support instruction"));
9993 break;
9994
9995 case 2: /* fpscr_nzcvqc. */
9996 case 12: /* vpr. */
9997 case 13: /* p0. */
9998 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
9999 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10000 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10001 _("selected processor does not support instruction"));
10002 if (inst.operands[0].reg != 2
10003 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10004 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10005 break;
10006
10007 default:
10008 break;
10009 }
10010
10011 /* APSR_ sets isvec. All other refs to PC are illegal. */
10012 if (!inst.operands[0].isvec && Rt == REG_PC)
10013 {
10014 inst.error = BAD_PC;
10015 return;
10016 }
10017
10018 /* If we get through parsing the register name, we just insert the number
10019 generated into the instruction without further validation. */
10020 inst.instruction |= (inst.operands[1].reg << 16);
10021 inst.instruction |= (Rt << 12);
10022}
10023
10024static void
10025do_vmsr (void)
10026{
10027 unsigned Rt = inst.operands[1].reg;
10028
10029 if (thumb_mode)
10030 reject_bad_reg (Rt);
10031 else if (Rt == REG_PC)
10032 {
10033 inst.error = BAD_PC;
10034 return;
10035 }
10036
10037 switch (inst.operands[0].reg)
10038 {
10039 /* MVFR2 is only valid for Armv8-A. */
10040 case 5:
10041 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10042 _(BAD_FPU));
10043 break;
10044
10045 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10046 case 1: /* fpcr. */
10047 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10048 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10049 _(BAD_FPU));
10050 break;
10051
10052 case 14: /* fpcxt_ns. */
10053 case 15: /* fpcxt_s. */
10054 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10055 _("selected processor does not support instruction"));
10056 break;
10057
10058 case 2: /* fpscr_nzcvqc. */
10059 case 12: /* vpr. */
10060 case 13: /* p0. */
10061 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10062 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10063 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10064 _("selected processor does not support instruction"));
10065 if (inst.operands[0].reg != 2
10066 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10067 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10068 break;
10069
10070 default:
10071 break;
10072 }
10073
10074 /* If we get through parsing the register name, we just insert the number
10075 generated into the instruction without further validation. */
10076 inst.instruction |= (inst.operands[0].reg << 16);
10077 inst.instruction |= (Rt << 12);
10078}
10079
10080static void
10081do_mrs (void)
10082{
10083 unsigned br;
10084
10085 if (do_vfp_nsyn_mrs () == SUCCESS)
10086 return;
10087
10088 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10089 inst.instruction |= inst.operands[0].reg << 12;
10090
10091 if (inst.operands[1].isreg)
10092 {
10093 br = inst.operands[1].reg;
10094 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10095 as_bad (_("bad register for mrs"));
10096 }
10097 else
10098 {
10099 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10100 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10101 != (PSR_c|PSR_f),
10102 _("'APSR', 'CPSR' or 'SPSR' expected"));
10103 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10104 }
10105
10106 inst.instruction |= br;
10107}
10108
10109/* Two possible forms:
10110 "{C|S}PSR_<field>, Rm",
10111 "{C|S}PSR_f, #expression". */
10112
10113static void
10114do_msr (void)
10115{
10116 if (do_vfp_nsyn_msr () == SUCCESS)
10117 return;
10118
10119 inst.instruction |= inst.operands[0].imm;
10120 if (inst.operands[1].isreg)
10121 inst.instruction |= inst.operands[1].reg;
10122 else
10123 {
10124 inst.instruction |= INST_IMMEDIATE;
10125 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10126 inst.relocs[0].pc_rel = 0;
10127 }
10128}
10129
10130static void
10131do_mul (void)
10132{
10133 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10134
10135 if (!inst.operands[2].present)
10136 inst.operands[2].reg = inst.operands[0].reg;
10137 inst.instruction |= inst.operands[0].reg << 16;
10138 inst.instruction |= inst.operands[1].reg;
10139 inst.instruction |= inst.operands[2].reg << 8;
10140
10141 if (inst.operands[0].reg == inst.operands[1].reg
10142 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10143 as_tsktsk (_("Rd and Rm should be different in mul"));
10144}
10145
10146/* Long Multiply Parser
10147 UMULL RdLo, RdHi, Rm, Rs
10148 SMULL RdLo, RdHi, Rm, Rs
10149 UMLAL RdLo, RdHi, Rm, Rs
10150 SMLAL RdLo, RdHi, Rm, Rs. */
10151
10152static void
10153do_mull (void)
10154{
10155 inst.instruction |= inst.operands[0].reg << 12;
10156 inst.instruction |= inst.operands[1].reg << 16;
10157 inst.instruction |= inst.operands[2].reg;
10158 inst.instruction |= inst.operands[3].reg << 8;
10159
10160 /* rdhi and rdlo must be different. */
10161 if (inst.operands[0].reg == inst.operands[1].reg)
10162 as_tsktsk (_("rdhi and rdlo must be different"));
10163
10164 /* rdhi, rdlo and rm must all be different before armv6. */
10165 if ((inst.operands[0].reg == inst.operands[2].reg
10166 || inst.operands[1].reg == inst.operands[2].reg)
10167 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10168 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10169}
10170
10171static void
10172do_nop (void)
10173{
10174 if (inst.operands[0].present
10175 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10176 {
10177 /* Architectural NOP hints are CPSR sets with no bits selected. */
10178 inst.instruction &= 0xf0000000;
10179 inst.instruction |= 0x0320f000;
10180 if (inst.operands[0].present)
10181 inst.instruction |= inst.operands[0].imm;
10182 }
10183}
10184
10185/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10186 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10187 Condition defaults to COND_ALWAYS.
10188 Error if Rd, Rn or Rm are R15. */
10189
10190static void
10191do_pkhbt (void)
10192{
10193 inst.instruction |= inst.operands[0].reg << 12;
10194 inst.instruction |= inst.operands[1].reg << 16;
10195 inst.instruction |= inst.operands[2].reg;
10196 if (inst.operands[3].present)
10197 encode_arm_shift (3);
10198}
10199
10200/* ARM V6 PKHTB (Argument Parse). */
10201
10202static void
10203do_pkhtb (void)
10204{
10205 if (!inst.operands[3].present)
10206 {
10207 /* If the shift specifier is omitted, turn the instruction
10208 into pkhbt rd, rm, rn. */
10209 inst.instruction &= 0xfff00010;
10210 inst.instruction |= inst.operands[0].reg << 12;
10211 inst.instruction |= inst.operands[1].reg;
10212 inst.instruction |= inst.operands[2].reg << 16;
10213 }
10214 else
10215 {
10216 inst.instruction |= inst.operands[0].reg << 12;
10217 inst.instruction |= inst.operands[1].reg << 16;
10218 inst.instruction |= inst.operands[2].reg;
10219 encode_arm_shift (3);
10220 }
10221}
10222
10223/* ARMv5TE: Preload-Cache
10224 MP Extensions: Preload for write
10225
10226 PLD(W) <addr_mode>
10227
10228 Syntactically, like LDR with B=1, W=0, L=1. */
10229
10230static void
10231do_pld (void)
10232{
10233 constraint (!inst.operands[0].isreg,
10234 _("'[' expected after PLD mnemonic"));
10235 constraint (inst.operands[0].postind,
10236 _("post-indexed expression used in preload instruction"));
10237 constraint (inst.operands[0].writeback,
10238 _("writeback used in preload instruction"));
10239 constraint (!inst.operands[0].preind,
10240 _("unindexed addressing used in preload instruction"));
10241 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10242}
10243
10244/* ARMv7: PLI <addr_mode> */
10245static void
10246do_pli (void)
10247{
10248 constraint (!inst.operands[0].isreg,
10249 _("'[' expected after PLI mnemonic"));
10250 constraint (inst.operands[0].postind,
10251 _("post-indexed expression used in preload instruction"));
10252 constraint (inst.operands[0].writeback,
10253 _("writeback used in preload instruction"));
10254 constraint (!inst.operands[0].preind,
10255 _("unindexed addressing used in preload instruction"));
10256 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10257 inst.instruction &= ~PRE_INDEX;
10258}
10259
10260static void
10261do_push_pop (void)
10262{
10263 constraint (inst.operands[0].writeback,
10264 _("push/pop do not support {reglist}^"));
10265 inst.operands[1] = inst.operands[0];
10266 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10267 inst.operands[0].isreg = 1;
10268 inst.operands[0].writeback = 1;
10269 inst.operands[0].reg = REG_SP;
10270 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
10271}
10272
10273/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10274 word at the specified address and the following word
10275 respectively.
10276 Unconditionally executed.
10277 Error if Rn is R15. */
10278
10279static void
10280do_rfe (void)
10281{
10282 inst.instruction |= inst.operands[0].reg << 16;
10283 if (inst.operands[0].writeback)
10284 inst.instruction |= WRITE_BACK;
10285}
10286
10287/* ARM V6 ssat (argument parse). */
10288
10289static void
10290do_ssat (void)
10291{
10292 inst.instruction |= inst.operands[0].reg << 12;
10293 inst.instruction |= (inst.operands[1].imm - 1) << 16;
10294 inst.instruction |= inst.operands[2].reg;
10295
10296 if (inst.operands[3].present)
10297 encode_arm_shift (3);
10298}
10299
10300/* ARM V6 usat (argument parse). */
10301
10302static void
10303do_usat (void)
10304{
10305 inst.instruction |= inst.operands[0].reg << 12;
10306 inst.instruction |= inst.operands[1].imm << 16;
10307 inst.instruction |= inst.operands[2].reg;
10308
10309 if (inst.operands[3].present)
10310 encode_arm_shift (3);
10311}
10312
10313/* ARM V6 ssat16 (argument parse). */
10314
10315static void
10316do_ssat16 (void)
10317{
10318 inst.instruction |= inst.operands[0].reg << 12;
10319 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10320 inst.instruction |= inst.operands[2].reg;
10321}
10322
10323static void
10324do_usat16 (void)
10325{
10326 inst.instruction |= inst.operands[0].reg << 12;
10327 inst.instruction |= inst.operands[1].imm << 16;
10328 inst.instruction |= inst.operands[2].reg;
10329}
10330
10331/* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
10332 preserving the other bits.
10333
10334 setend <endian_specifier>, where <endian_specifier> is either
10335 BE or LE. */
10336
10337static void
10338do_setend (void)
10339{
10340 if (warn_on_deprecated
10341 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10342 as_tsktsk (_("setend use is deprecated for ARMv8"));
10343
10344 if (inst.operands[0].imm)
10345 inst.instruction |= 0x200;
10346}
10347
10348static void
10349do_shift (void)
10350{
10351 unsigned int Rm = (inst.operands[1].present
10352 ? inst.operands[1].reg
10353 : inst.operands[0].reg);
10354
10355 inst.instruction |= inst.operands[0].reg << 12;
10356 inst.instruction |= Rm;
10357 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
10358 {
10359 inst.instruction |= inst.operands[2].reg << 8;
10360 inst.instruction |= SHIFT_BY_REG;
10361 /* PR 12854: Error on extraneous shifts. */
10362 constraint (inst.operands[2].shifted,
10363 _("extraneous shift as part of operand to shift insn"));
10364 }
10365 else
10366 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10367}
10368
10369static void
10370do_smc (void)
10371{
10372 unsigned int value = inst.relocs[0].exp.X_add_number;
10373 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10374
10375 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10376 inst.relocs[0].pc_rel = 0;
10377}
10378
10379static void
10380do_hvc (void)
10381{
10382 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10383 inst.relocs[0].pc_rel = 0;
10384}
10385
10386static void
10387do_swi (void)
10388{
10389 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10390 inst.relocs[0].pc_rel = 0;
10391}
10392
10393static void
10394do_setpan (void)
10395{
10396 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10397 _("selected processor does not support SETPAN instruction"));
10398
10399 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10400}
10401
10402static void
10403do_t_setpan (void)
10404{
10405 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10406 _("selected processor does not support SETPAN instruction"));
10407
10408 inst.instruction |= (inst.operands[0].imm << 3);
10409}
10410
10411/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10412 SMLAxy{cond} Rd,Rm,Rs,Rn
10413 SMLAWy{cond} Rd,Rm,Rs,Rn
10414 Error if any register is R15. */
10415
10416static void
10417do_smla (void)
10418{
10419 inst.instruction |= inst.operands[0].reg << 16;
10420 inst.instruction |= inst.operands[1].reg;
10421 inst.instruction |= inst.operands[2].reg << 8;
10422 inst.instruction |= inst.operands[3].reg << 12;
10423}
10424
10425/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10426 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10427 Error if any register is R15.
10428 Warning if Rdlo == Rdhi. */
10429
10430static void
10431do_smlal (void)
10432{
10433 inst.instruction |= inst.operands[0].reg << 12;
10434 inst.instruction |= inst.operands[1].reg << 16;
10435 inst.instruction |= inst.operands[2].reg;
10436 inst.instruction |= inst.operands[3].reg << 8;
10437
10438 if (inst.operands[0].reg == inst.operands[1].reg)
10439 as_tsktsk (_("rdhi and rdlo must be different"));
10440}
10441
10442/* ARM V5E (El Segundo) signed-multiply (argument parse)
10443 SMULxy{cond} Rd,Rm,Rs
10444 Error if any register is R15. */
10445
10446static void
10447do_smul (void)
10448{
10449 inst.instruction |= inst.operands[0].reg << 16;
10450 inst.instruction |= inst.operands[1].reg;
10451 inst.instruction |= inst.operands[2].reg << 8;
10452}
10453
10454/* ARM V6 srs (argument parse). The variable fields in the encoding are
10455 the same for both ARM and Thumb-2. */
10456
10457static void
10458do_srs (void)
10459{
10460 int reg;
10461
10462 if (inst.operands[0].present)
10463 {
10464 reg = inst.operands[0].reg;
10465 constraint (reg != REG_SP, _("SRS base register must be r13"));
10466 }
10467 else
10468 reg = REG_SP;
10469
10470 inst.instruction |= reg << 16;
10471 inst.instruction |= inst.operands[1].imm;
10472 if (inst.operands[0].writeback || inst.operands[1].writeback)
10473 inst.instruction |= WRITE_BACK;
10474}
10475
10476/* ARM V6 strex (argument parse). */
10477
10478static void
10479do_strex (void)
10480{
10481 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10482 || inst.operands[2].postind || inst.operands[2].writeback
10483 || inst.operands[2].immisreg || inst.operands[2].shifted
10484 || inst.operands[2].negative
10485 /* See comment in do_ldrex(). */
10486 || (inst.operands[2].reg == REG_PC),
10487 BAD_ADDR_MODE);
10488
10489 constraint (inst.operands[0].reg == inst.operands[1].reg
10490 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10491
10492 constraint (inst.relocs[0].exp.X_op != O_constant
10493 || inst.relocs[0].exp.X_add_number != 0,
10494 _("offset must be zero in ARM encoding"));
10495
10496 inst.instruction |= inst.operands[0].reg << 12;
10497 inst.instruction |= inst.operands[1].reg;
10498 inst.instruction |= inst.operands[2].reg << 16;
10499 inst.relocs[0].type = BFD_RELOC_UNUSED;
10500}
10501
10502static void
10503do_t_strexbh (void)
10504{
10505 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10506 || inst.operands[2].postind || inst.operands[2].writeback
10507 || inst.operands[2].immisreg || inst.operands[2].shifted
10508 || inst.operands[2].negative,
10509 BAD_ADDR_MODE);
10510
10511 constraint (inst.operands[0].reg == inst.operands[1].reg
10512 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10513
10514 do_rm_rd_rn ();
10515}
10516
10517static void
10518do_strexd (void)
10519{
10520 constraint (inst.operands[1].reg % 2 != 0,
10521 _("even register required"));
10522 constraint (inst.operands[2].present
10523 && inst.operands[2].reg != inst.operands[1].reg + 1,
10524 _("can only store two consecutive registers"));
10525 /* If op 2 were present and equal to PC, this function wouldn't
10526 have been called in the first place. */
10527 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10528
10529 constraint (inst.operands[0].reg == inst.operands[1].reg
10530 || inst.operands[0].reg == inst.operands[1].reg + 1
10531 || inst.operands[0].reg == inst.operands[3].reg,
10532 BAD_OVERLAP);
10533
10534 inst.instruction |= inst.operands[0].reg << 12;
10535 inst.instruction |= inst.operands[1].reg;
10536 inst.instruction |= inst.operands[3].reg << 16;
10537}
10538
10539/* ARM V8 STRL. */
10540static void
10541do_stlex (void)
10542{
10543 constraint (inst.operands[0].reg == inst.operands[1].reg
10544 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10545
10546 do_rd_rm_rn ();
10547}
10548
10549static void
10550do_t_stlex (void)
10551{
10552 constraint (inst.operands[0].reg == inst.operands[1].reg
10553 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10554
10555 do_rm_rd_rn ();
10556}
10557
10558/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10559 extends it to 32-bits, and adds the result to a value in another
10560 register. You can specify a rotation by 0, 8, 16, or 24 bits
10561 before extracting the 16-bit value.
10562 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10563 Condition defaults to COND_ALWAYS.
10564 Error if any register uses R15. */
10565
10566static void
10567do_sxtah (void)
10568{
10569 inst.instruction |= inst.operands[0].reg << 12;
10570 inst.instruction |= inst.operands[1].reg << 16;
10571 inst.instruction |= inst.operands[2].reg;
10572 inst.instruction |= inst.operands[3].imm << 10;
10573}
10574
10575/* ARM V6 SXTH.
10576
10577 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10578 Condition defaults to COND_ALWAYS.
10579 Error if any register uses R15. */
10580
10581static void
10582do_sxth (void)
10583{
10584 inst.instruction |= inst.operands[0].reg << 12;
10585 inst.instruction |= inst.operands[1].reg;
10586 inst.instruction |= inst.operands[2].imm << 10;
10587}
10588\f
10589/* VFP instructions. In a logical order: SP variant first, monad
10590 before dyad, arithmetic then move then load/store. */
10591
10592static void
10593do_vfp_sp_monadic (void)
10594{
10595 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10596 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10597 _(BAD_FPU));
10598
10599 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10600 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10601}
10602
10603static void
10604do_vfp_sp_dyadic (void)
10605{
10606 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10607 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10608 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10609}
10610
10611static void
10612do_vfp_sp_compare_z (void)
10613{
10614 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10615}
10616
10617static void
10618do_vfp_dp_sp_cvt (void)
10619{
10620 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10621 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10622}
10623
10624static void
10625do_vfp_sp_dp_cvt (void)
10626{
10627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10628 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10629}
10630
10631static void
10632do_vfp_reg_from_sp (void)
10633{
10634 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10635 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10636 _(BAD_FPU));
10637
10638 inst.instruction |= inst.operands[0].reg << 12;
10639 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10640}
10641
10642static void
10643do_vfp_reg2_from_sp2 (void)
10644{
10645 constraint (inst.operands[2].imm != 2,
10646 _("only two consecutive VFP SP registers allowed here"));
10647 inst.instruction |= inst.operands[0].reg << 12;
10648 inst.instruction |= inst.operands[1].reg << 16;
10649 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10650}
10651
10652static void
10653do_vfp_sp_from_reg (void)
10654{
10655 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10656 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10657 _(BAD_FPU));
10658
10659 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10660 inst.instruction |= inst.operands[1].reg << 12;
10661}
10662
10663static void
10664do_vfp_sp2_from_reg2 (void)
10665{
10666 constraint (inst.operands[0].imm != 2,
10667 _("only two consecutive VFP SP registers allowed here"));
10668 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10669 inst.instruction |= inst.operands[1].reg << 12;
10670 inst.instruction |= inst.operands[2].reg << 16;
10671}
10672
10673static void
10674do_vfp_sp_ldst (void)
10675{
10676 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10677 encode_arm_cp_address (1, FALSE, TRUE, 0);
10678}
10679
10680static void
10681do_vfp_dp_ldst (void)
10682{
10683 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10684 encode_arm_cp_address (1, FALSE, TRUE, 0);
10685}
10686
10687
10688static void
10689vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10690{
10691 if (inst.operands[0].writeback)
10692 inst.instruction |= WRITE_BACK;
10693 else
10694 constraint (ldstm_type != VFP_LDSTMIA,
10695 _("this addressing mode requires base-register writeback"));
10696 inst.instruction |= inst.operands[0].reg << 16;
10697 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10698 inst.instruction |= inst.operands[1].imm;
10699}
10700
10701static void
10702vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10703{
10704 int count;
10705
10706 if (inst.operands[0].writeback)
10707 inst.instruction |= WRITE_BACK;
10708 else
10709 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10710 _("this addressing mode requires base-register writeback"));
10711
10712 inst.instruction |= inst.operands[0].reg << 16;
10713 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10714
10715 count = inst.operands[1].imm << 1;
10716 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10717 count += 1;
10718
10719 inst.instruction |= count;
10720}
10721
10722static void
10723do_vfp_sp_ldstmia (void)
10724{
10725 vfp_sp_ldstm (VFP_LDSTMIA);
10726}
10727
10728static void
10729do_vfp_sp_ldstmdb (void)
10730{
10731 vfp_sp_ldstm (VFP_LDSTMDB);
10732}
10733
10734static void
10735do_vfp_dp_ldstmia (void)
10736{
10737 vfp_dp_ldstm (VFP_LDSTMIA);
10738}
10739
10740static void
10741do_vfp_dp_ldstmdb (void)
10742{
10743 vfp_dp_ldstm (VFP_LDSTMDB);
10744}
10745
10746static void
10747do_vfp_xp_ldstmia (void)
10748{
10749 vfp_dp_ldstm (VFP_LDSTMIAX);
10750}
10751
10752static void
10753do_vfp_xp_ldstmdb (void)
10754{
10755 vfp_dp_ldstm (VFP_LDSTMDBX);
10756}
10757
10758static void
10759do_vfp_dp_rd_rm (void)
10760{
10761 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10762 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10763 _(BAD_FPU));
10764
10765 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10766 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10767}
10768
10769static void
10770do_vfp_dp_rn_rd (void)
10771{
10772 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10773 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10774}
10775
10776static void
10777do_vfp_dp_rd_rn (void)
10778{
10779 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10780 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10781}
10782
10783static void
10784do_vfp_dp_rd_rn_rm (void)
10785{
10786 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10787 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10788 _(BAD_FPU));
10789
10790 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10791 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10792 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10793}
10794
10795static void
10796do_vfp_dp_rd (void)
10797{
10798 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10799}
10800
10801static void
10802do_vfp_dp_rm_rd_rn (void)
10803{
10804 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10805 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10806 _(BAD_FPU));
10807
10808 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10809 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10810 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10811}
10812
10813/* VFPv3 instructions. */
10814static void
10815do_vfp_sp_const (void)
10816{
10817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10818 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10819 inst.instruction |= (inst.operands[1].imm & 0x0f);
10820}
10821
10822static void
10823do_vfp_dp_const (void)
10824{
10825 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10826 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10827 inst.instruction |= (inst.operands[1].imm & 0x0f);
10828}
10829
10830static void
10831vfp_conv (int srcsize)
10832{
10833 int immbits = srcsize - inst.operands[1].imm;
10834
10835 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10836 {
10837 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10838 i.e. immbits must be in range 0 - 16. */
10839 inst.error = _("immediate value out of range, expected range [0, 16]");
10840 return;
10841 }
10842 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10843 {
10844 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10845 i.e. immbits must be in range 0 - 31. */
10846 inst.error = _("immediate value out of range, expected range [1, 32]");
10847 return;
10848 }
10849
10850 inst.instruction |= (immbits & 1) << 5;
10851 inst.instruction |= (immbits >> 1);
10852}
10853
10854static void
10855do_vfp_sp_conv_16 (void)
10856{
10857 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10858 vfp_conv (16);
10859}
10860
10861static void
10862do_vfp_dp_conv_16 (void)
10863{
10864 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10865 vfp_conv (16);
10866}
10867
10868static void
10869do_vfp_sp_conv_32 (void)
10870{
10871 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10872 vfp_conv (32);
10873}
10874
10875static void
10876do_vfp_dp_conv_32 (void)
10877{
10878 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10879 vfp_conv (32);
10880}
10881\f
10882/* FPA instructions. Also in a logical order. */
10883
10884static void
10885do_fpa_cmp (void)
10886{
10887 inst.instruction |= inst.operands[0].reg << 16;
10888 inst.instruction |= inst.operands[1].reg;
10889}
10890
10891static void
10892do_fpa_ldmstm (void)
10893{
10894 inst.instruction |= inst.operands[0].reg << 12;
10895 switch (inst.operands[1].imm)
10896 {
10897 case 1: inst.instruction |= CP_T_X; break;
10898 case 2: inst.instruction |= CP_T_Y; break;
10899 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10900 case 4: break;
10901 default: abort ();
10902 }
10903
10904 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10905 {
10906 /* The instruction specified "ea" or "fd", so we can only accept
10907 [Rn]{!}. The instruction does not really support stacking or
10908 unstacking, so we have to emulate these by setting appropriate
10909 bits and offsets. */
10910 constraint (inst.relocs[0].exp.X_op != O_constant
10911 || inst.relocs[0].exp.X_add_number != 0,
10912 _("this instruction does not support indexing"));
10913
10914 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10915 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10916
10917 if (!(inst.instruction & INDEX_UP))
10918 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10919
10920 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10921 {
10922 inst.operands[2].preind = 0;
10923 inst.operands[2].postind = 1;
10924 }
10925 }
10926
10927 encode_arm_cp_address (2, TRUE, TRUE, 0);
10928}
10929\f
10930/* iWMMXt instructions: strictly in alphabetical order. */
10931
10932static void
10933do_iwmmxt_tandorc (void)
10934{
10935 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10936}
10937
10938static void
10939do_iwmmxt_textrc (void)
10940{
10941 inst.instruction |= inst.operands[0].reg << 12;
10942 inst.instruction |= inst.operands[1].imm;
10943}
10944
10945static void
10946do_iwmmxt_textrm (void)
10947{
10948 inst.instruction |= inst.operands[0].reg << 12;
10949 inst.instruction |= inst.operands[1].reg << 16;
10950 inst.instruction |= inst.operands[2].imm;
10951}
10952
10953static void
10954do_iwmmxt_tinsr (void)
10955{
10956 inst.instruction |= inst.operands[0].reg << 16;
10957 inst.instruction |= inst.operands[1].reg << 12;
10958 inst.instruction |= inst.operands[2].imm;
10959}
10960
10961static void
10962do_iwmmxt_tmia (void)
10963{
10964 inst.instruction |= inst.operands[0].reg << 5;
10965 inst.instruction |= inst.operands[1].reg;
10966 inst.instruction |= inst.operands[2].reg << 12;
10967}
10968
10969static void
10970do_iwmmxt_waligni (void)
10971{
10972 inst.instruction |= inst.operands[0].reg << 12;
10973 inst.instruction |= inst.operands[1].reg << 16;
10974 inst.instruction |= inst.operands[2].reg;
10975 inst.instruction |= inst.operands[3].imm << 20;
10976}
10977
10978static void
10979do_iwmmxt_wmerge (void)
10980{
10981 inst.instruction |= inst.operands[0].reg << 12;
10982 inst.instruction |= inst.operands[1].reg << 16;
10983 inst.instruction |= inst.operands[2].reg;
10984 inst.instruction |= inst.operands[3].imm << 21;
10985}
10986
10987static void
10988do_iwmmxt_wmov (void)
10989{
10990 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10991 inst.instruction |= inst.operands[0].reg << 12;
10992 inst.instruction |= inst.operands[1].reg << 16;
10993 inst.instruction |= inst.operands[1].reg;
10994}
10995
10996static void
10997do_iwmmxt_wldstbh (void)
10998{
10999 int reloc;
11000 inst.instruction |= inst.operands[0].reg << 12;
11001 if (thumb_mode)
11002 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11003 else
11004 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11005 encode_arm_cp_address (1, TRUE, FALSE, reloc);
11006}
11007
11008static void
11009do_iwmmxt_wldstw (void)
11010{
11011 /* RIWR_RIWC clears .isreg for a control register. */
11012 if (!inst.operands[0].isreg)
11013 {
11014 constraint (inst.cond != COND_ALWAYS, BAD_COND);
11015 inst.instruction |= 0xf0000000;
11016 }
11017
11018 inst.instruction |= inst.operands[0].reg << 12;
11019 encode_arm_cp_address (1, TRUE, TRUE, 0);
11020}
11021
11022static void
11023do_iwmmxt_wldstd (void)
11024{
11025 inst.instruction |= inst.operands[0].reg << 12;
11026 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11027 && inst.operands[1].immisreg)
11028 {
11029 inst.instruction &= ~0x1a000ff;
11030 inst.instruction |= (0xfU << 28);
11031 if (inst.operands[1].preind)
11032 inst.instruction |= PRE_INDEX;
11033 if (!inst.operands[1].negative)
11034 inst.instruction |= INDEX_UP;
11035 if (inst.operands[1].writeback)
11036 inst.instruction |= WRITE_BACK;
11037 inst.instruction |= inst.operands[1].reg << 16;
11038 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11039 inst.instruction |= inst.operands[1].imm;
11040 }
11041 else
11042 encode_arm_cp_address (1, TRUE, FALSE, 0);
11043}
11044
11045static void
11046do_iwmmxt_wshufh (void)
11047{
11048 inst.instruction |= inst.operands[0].reg << 12;
11049 inst.instruction |= inst.operands[1].reg << 16;
11050 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11051 inst.instruction |= (inst.operands[2].imm & 0x0f);
11052}
11053
11054static void
11055do_iwmmxt_wzero (void)
11056{
11057 /* WZERO reg is an alias for WANDN reg, reg, reg. */
11058 inst.instruction |= inst.operands[0].reg;
11059 inst.instruction |= inst.operands[0].reg << 12;
11060 inst.instruction |= inst.operands[0].reg << 16;
11061}
11062
11063static void
11064do_iwmmxt_wrwrwr_or_imm5 (void)
11065{
11066 if (inst.operands[2].isreg)
11067 do_rd_rn_rm ();
11068 else {
11069 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11070 _("immediate operand requires iWMMXt2"));
11071 do_rd_rn ();
11072 if (inst.operands[2].imm == 0)
11073 {
11074 switch ((inst.instruction >> 20) & 0xf)
11075 {
11076 case 4:
11077 case 5:
11078 case 6:
11079 case 7:
11080 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
11081 inst.operands[2].imm = 16;
11082 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11083 break;
11084 case 8:
11085 case 9:
11086 case 10:
11087 case 11:
11088 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
11089 inst.operands[2].imm = 32;
11090 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11091 break;
11092 case 12:
11093 case 13:
11094 case 14:
11095 case 15:
11096 {
11097 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
11098 unsigned long wrn;
11099 wrn = (inst.instruction >> 16) & 0xf;
11100 inst.instruction &= 0xff0fff0f;
11101 inst.instruction |= wrn;
11102 /* Bail out here; the instruction is now assembled. */
11103 return;
11104 }
11105 }
11106 }
11107 /* Map 32 -> 0, etc. */
11108 inst.operands[2].imm &= 0x1f;
11109 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11110 }
11111}
11112\f
11113/* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
11114 operations first, then control, shift, and load/store. */
11115
11116/* Insns like "foo X,Y,Z". */
11117
11118static void
11119do_mav_triple (void)
11120{
11121 inst.instruction |= inst.operands[0].reg << 16;
11122 inst.instruction |= inst.operands[1].reg;
11123 inst.instruction |= inst.operands[2].reg << 12;
11124}
11125
11126/* Insns like "foo W,X,Y,Z".
11127 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
11128
11129static void
11130do_mav_quad (void)
11131{
11132 inst.instruction |= inst.operands[0].reg << 5;
11133 inst.instruction |= inst.operands[1].reg << 12;
11134 inst.instruction |= inst.operands[2].reg << 16;
11135 inst.instruction |= inst.operands[3].reg;
11136}
11137
11138/* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
11139static void
11140do_mav_dspsc (void)
11141{
11142 inst.instruction |= inst.operands[1].reg << 12;
11143}
11144
11145/* Maverick shift immediate instructions.
11146 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11147 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
11148
11149static void
11150do_mav_shift (void)
11151{
11152 int imm = inst.operands[2].imm;
11153
11154 inst.instruction |= inst.operands[0].reg << 12;
11155 inst.instruction |= inst.operands[1].reg << 16;
11156
11157 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11158 Bits 5-7 of the insn should have bits 4-6 of the immediate.
11159 Bit 4 should be 0. */
11160 imm = (imm & 0xf) | ((imm & 0x70) << 1);
11161
11162 inst.instruction |= imm;
11163}
11164\f
11165/* XScale instructions. Also sorted arithmetic before move. */
11166
11167/* Xscale multiply-accumulate (argument parse)
11168 MIAcc acc0,Rm,Rs
11169 MIAPHcc acc0,Rm,Rs
11170 MIAxycc acc0,Rm,Rs. */
11171
11172static void
11173do_xsc_mia (void)
11174{
11175 inst.instruction |= inst.operands[1].reg;
11176 inst.instruction |= inst.operands[2].reg << 12;
11177}
11178
11179/* Xscale move-accumulator-register (argument parse)
11180
11181 MARcc acc0,RdLo,RdHi. */
11182
11183static void
11184do_xsc_mar (void)
11185{
11186 inst.instruction |= inst.operands[1].reg << 12;
11187 inst.instruction |= inst.operands[2].reg << 16;
11188}
11189
11190/* Xscale move-register-accumulator (argument parse)
11191
11192 MRAcc RdLo,RdHi,acc0. */
11193
11194static void
11195do_xsc_mra (void)
11196{
11197 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11198 inst.instruction |= inst.operands[0].reg << 12;
11199 inst.instruction |= inst.operands[1].reg << 16;
11200}
11201\f
11202/* Encoding functions relevant only to Thumb. */
11203
11204/* inst.operands[i] is a shifted-register operand; encode
11205 it into inst.instruction in the format used by Thumb32. */
11206
11207static void
11208encode_thumb32_shifted_operand (int i)
11209{
11210 unsigned int value = inst.relocs[0].exp.X_add_number;
11211 unsigned int shift = inst.operands[i].shift_kind;
11212
11213 constraint (inst.operands[i].immisreg,
11214 _("shift by register not allowed in thumb mode"));
11215 inst.instruction |= inst.operands[i].reg;
11216 if (shift == SHIFT_RRX)
11217 inst.instruction |= SHIFT_ROR << 4;
11218 else
11219 {
11220 constraint (inst.relocs[0].exp.X_op != O_constant,
11221 _("expression too complex"));
11222
11223 constraint (value > 32
11224 || (value == 32 && (shift == SHIFT_LSL
11225 || shift == SHIFT_ROR)),
11226 _("shift expression is too large"));
11227
11228 if (value == 0)
11229 shift = SHIFT_LSL;
11230 else if (value == 32)
11231 value = 0;
11232
11233 inst.instruction |= shift << 4;
11234 inst.instruction |= (value & 0x1c) << 10;
11235 inst.instruction |= (value & 0x03) << 6;
11236 }
11237}
11238
11239
11240/* inst.operands[i] was set up by parse_address. Encode it into a
11241 Thumb32 format load or store instruction. Reject forms that cannot
11242 be used with such instructions. If is_t is true, reject forms that
11243 cannot be used with a T instruction; if is_d is true, reject forms
11244 that cannot be used with a D instruction. If it is a store insn,
11245 reject PC in Rn. */
11246
11247static void
11248encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
11249{
11250 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
11251
11252 constraint (!inst.operands[i].isreg,
11253 _("Instruction does not support =N addresses"));
11254
11255 inst.instruction |= inst.operands[i].reg << 16;
11256 if (inst.operands[i].immisreg)
11257 {
11258 constraint (is_pc, BAD_PC_ADDRESSING);
11259 constraint (is_t || is_d, _("cannot use register index with this instruction"));
11260 constraint (inst.operands[i].negative,
11261 _("Thumb does not support negative register indexing"));
11262 constraint (inst.operands[i].postind,
11263 _("Thumb does not support register post-indexing"));
11264 constraint (inst.operands[i].writeback,
11265 _("Thumb does not support register indexing with writeback"));
11266 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11267 _("Thumb supports only LSL in shifted register indexing"));
11268
11269 inst.instruction |= inst.operands[i].imm;
11270 if (inst.operands[i].shifted)
11271 {
11272 constraint (inst.relocs[0].exp.X_op != O_constant,
11273 _("expression too complex"));
11274 constraint (inst.relocs[0].exp.X_add_number < 0
11275 || inst.relocs[0].exp.X_add_number > 3,
11276 _("shift out of range"));
11277 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11278 }
11279 inst.relocs[0].type = BFD_RELOC_UNUSED;
11280 }
11281 else if (inst.operands[i].preind)
11282 {
11283 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11284 constraint (is_t && inst.operands[i].writeback,
11285 _("cannot use writeback with this instruction"));
11286 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11287 BAD_PC_ADDRESSING);
11288
11289 if (is_d)
11290 {
11291 inst.instruction |= 0x01000000;
11292 if (inst.operands[i].writeback)
11293 inst.instruction |= 0x00200000;
11294 }
11295 else
11296 {
11297 inst.instruction |= 0x00000c00;
11298 if (inst.operands[i].writeback)
11299 inst.instruction |= 0x00000100;
11300 }
11301 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11302 }
11303 else if (inst.operands[i].postind)
11304 {
11305 gas_assert (inst.operands[i].writeback);
11306 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11307 constraint (is_t, _("cannot use post-indexing with this instruction"));
11308
11309 if (is_d)
11310 inst.instruction |= 0x00200000;
11311 else
11312 inst.instruction |= 0x00000900;
11313 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11314 }
11315 else /* unindexed - only for coprocessor */
11316 inst.error = _("instruction does not accept unindexed addressing");
11317}
11318
11319/* Table of Thumb instructions which exist in 16- and/or 32-bit
11320 encodings (the latter only in post-V6T2 cores). The index is the
11321 value used in the insns table below. When there is more than one
11322 possible 16-bit encoding for the instruction, this table always
11323 holds variant (1).
11324 Also contains several pseudo-instructions used during relaxation. */
11325#define T16_32_TAB \
11326 X(_adc, 4140, eb400000), \
11327 X(_adcs, 4140, eb500000), \
11328 X(_add, 1c00, eb000000), \
11329 X(_adds, 1c00, eb100000), \
11330 X(_addi, 0000, f1000000), \
11331 X(_addis, 0000, f1100000), \
11332 X(_add_pc,000f, f20f0000), \
11333 X(_add_sp,000d, f10d0000), \
11334 X(_adr, 000f, f20f0000), \
11335 X(_and, 4000, ea000000), \
11336 X(_ands, 4000, ea100000), \
11337 X(_asr, 1000, fa40f000), \
11338 X(_asrs, 1000, fa50f000), \
11339 X(_b, e000, f000b000), \
11340 X(_bcond, d000, f0008000), \
11341 X(_bf, 0000, f040e001), \
11342 X(_bfcsel,0000, f000e001), \
11343 X(_bfx, 0000, f060e001), \
11344 X(_bfl, 0000, f000c001), \
11345 X(_bflx, 0000, f070e001), \
11346 X(_bic, 4380, ea200000), \
11347 X(_bics, 4380, ea300000), \
11348 X(_cinc, 0000, ea509000), \
11349 X(_cinv, 0000, ea50a000), \
11350 X(_cmn, 42c0, eb100f00), \
11351 X(_cmp, 2800, ebb00f00), \
11352 X(_cneg, 0000, ea50b000), \
11353 X(_cpsie, b660, f3af8400), \
11354 X(_cpsid, b670, f3af8600), \
11355 X(_cpy, 4600, ea4f0000), \
11356 X(_csel, 0000, ea508000), \
11357 X(_cset, 0000, ea5f900f), \
11358 X(_csetm, 0000, ea5fa00f), \
11359 X(_csinc, 0000, ea509000), \
11360 X(_csinv, 0000, ea50a000), \
11361 X(_csneg, 0000, ea50b000), \
11362 X(_dec_sp,80dd, f1ad0d00), \
11363 X(_dls, 0000, f040e001), \
11364 X(_dlstp, 0000, f000e001), \
11365 X(_eor, 4040, ea800000), \
11366 X(_eors, 4040, ea900000), \
11367 X(_inc_sp,00dd, f10d0d00), \
11368 X(_lctp, 0000, f00fe001), \
11369 X(_ldmia, c800, e8900000), \
11370 X(_ldr, 6800, f8500000), \
11371 X(_ldrb, 7800, f8100000), \
11372 X(_ldrh, 8800, f8300000), \
11373 X(_ldrsb, 5600, f9100000), \
11374 X(_ldrsh, 5e00, f9300000), \
11375 X(_ldr_pc,4800, f85f0000), \
11376 X(_ldr_pc2,4800, f85f0000), \
11377 X(_ldr_sp,9800, f85d0000), \
11378 X(_le, 0000, f00fc001), \
11379 X(_letp, 0000, f01fc001), \
11380 X(_lsl, 0000, fa00f000), \
11381 X(_lsls, 0000, fa10f000), \
11382 X(_lsr, 0800, fa20f000), \
11383 X(_lsrs, 0800, fa30f000), \
11384 X(_mov, 2000, ea4f0000), \
11385 X(_movs, 2000, ea5f0000), \
11386 X(_mul, 4340, fb00f000), \
11387 X(_muls, 4340, ffffffff), /* no 32b muls */ \
11388 X(_mvn, 43c0, ea6f0000), \
11389 X(_mvns, 43c0, ea7f0000), \
11390 X(_neg, 4240, f1c00000), /* rsb #0 */ \
11391 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
11392 X(_orr, 4300, ea400000), \
11393 X(_orrs, 4300, ea500000), \
11394 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
11395 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
11396 X(_rev, ba00, fa90f080), \
11397 X(_rev16, ba40, fa90f090), \
11398 X(_revsh, bac0, fa90f0b0), \
11399 X(_ror, 41c0, fa60f000), \
11400 X(_rors, 41c0, fa70f000), \
11401 X(_sbc, 4180, eb600000), \
11402 X(_sbcs, 4180, eb700000), \
11403 X(_stmia, c000, e8800000), \
11404 X(_str, 6000, f8400000), \
11405 X(_strb, 7000, f8000000), \
11406 X(_strh, 8000, f8200000), \
11407 X(_str_sp,9000, f84d0000), \
11408 X(_sub, 1e00, eba00000), \
11409 X(_subs, 1e00, ebb00000), \
11410 X(_subi, 8000, f1a00000), \
11411 X(_subis, 8000, f1b00000), \
11412 X(_sxtb, b240, fa4ff080), \
11413 X(_sxth, b200, fa0ff080), \
11414 X(_tst, 4200, ea100f00), \
11415 X(_uxtb, b2c0, fa5ff080), \
11416 X(_uxth, b280, fa1ff080), \
11417 X(_nop, bf00, f3af8000), \
11418 X(_yield, bf10, f3af8001), \
11419 X(_wfe, bf20, f3af8002), \
11420 X(_wfi, bf30, f3af8003), \
11421 X(_wls, 0000, f040c001), \
11422 X(_wlstp, 0000, f000c001), \
11423 X(_sev, bf40, f3af8004), \
11424 X(_sevl, bf50, f3af8005), \
11425 X(_udf, de00, f7f0a000)
11426
11427/* To catch errors in encoding functions, the codes are all offset by
11428 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11429 as 16-bit instructions. */
11430#define X(a,b,c) T_MNEM##a
11431enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11432#undef X
11433
11434#define X(a,b,c) 0x##b
11435static const unsigned short thumb_op16[] = { T16_32_TAB };
11436#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11437#undef X
11438
11439#define X(a,b,c) 0x##c
11440static const unsigned int thumb_op32[] = { T16_32_TAB };
11441#define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11442#define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
11443#undef X
11444#undef T16_32_TAB
11445
11446/* Thumb instruction encoders, in alphabetical order. */
11447
11448/* ADDW or SUBW. */
11449
11450static void
11451do_t_add_sub_w (void)
11452{
11453 int Rd, Rn;
11454
11455 Rd = inst.operands[0].reg;
11456 Rn = inst.operands[1].reg;
11457
11458 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11459 is the SP-{plus,minus}-immediate form of the instruction. */
11460 if (Rn == REG_SP)
11461 constraint (Rd == REG_PC, BAD_PC);
11462 else
11463 reject_bad_reg (Rd);
11464
11465 inst.instruction |= (Rn << 16) | (Rd << 8);
11466 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11467}
11468
11469/* Parse an add or subtract instruction. We get here with inst.instruction
11470 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11471
11472static void
11473do_t_add_sub (void)
11474{
11475 int Rd, Rs, Rn;
11476
11477 Rd = inst.operands[0].reg;
11478 Rs = (inst.operands[1].present
11479 ? inst.operands[1].reg /* Rd, Rs, foo */
11480 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11481
11482 if (Rd == REG_PC)
11483 set_pred_insn_type_last ();
11484
11485 if (unified_syntax)
11486 {
11487 bfd_boolean flags;
11488 bfd_boolean narrow;
11489 int opcode;
11490
11491 flags = (inst.instruction == T_MNEM_adds
11492 || inst.instruction == T_MNEM_subs);
11493 if (flags)
11494 narrow = !in_pred_block ();
11495 else
11496 narrow = in_pred_block ();
11497 if (!inst.operands[2].isreg)
11498 {
11499 int add;
11500
11501 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11502 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11503
11504 add = (inst.instruction == T_MNEM_add
11505 || inst.instruction == T_MNEM_adds);
11506 opcode = 0;
11507 if (inst.size_req != 4)
11508 {
11509 /* Attempt to use a narrow opcode, with relaxation if
11510 appropriate. */
11511 if (Rd == REG_SP && Rs == REG_SP && !flags)
11512 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11513 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11514 opcode = T_MNEM_add_sp;
11515 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11516 opcode = T_MNEM_add_pc;
11517 else if (Rd <= 7 && Rs <= 7 && narrow)
11518 {
11519 if (flags)
11520 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11521 else
11522 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11523 }
11524 if (opcode)
11525 {
11526 inst.instruction = THUMB_OP16(opcode);
11527 inst.instruction |= (Rd << 4) | Rs;
11528 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11529 || (inst.relocs[0].type
11530 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11531 {
11532 if (inst.size_req == 2)
11533 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11534 else
11535 inst.relax = opcode;
11536 }
11537 }
11538 else
11539 constraint (inst.size_req == 2, BAD_HIREG);
11540 }
11541 if (inst.size_req == 4
11542 || (inst.size_req != 2 && !opcode))
11543 {
11544 constraint ((inst.relocs[0].type
11545 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11546 && (inst.relocs[0].type
11547 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11548 THUMB1_RELOC_ONLY);
11549 if (Rd == REG_PC)
11550 {
11551 constraint (add, BAD_PC);
11552 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11553 _("only SUBS PC, LR, #const allowed"));
11554 constraint (inst.relocs[0].exp.X_op != O_constant,
11555 _("expression too complex"));
11556 constraint (inst.relocs[0].exp.X_add_number < 0
11557 || inst.relocs[0].exp.X_add_number > 0xff,
11558 _("immediate value out of range"));
11559 inst.instruction = T2_SUBS_PC_LR
11560 | inst.relocs[0].exp.X_add_number;
11561 inst.relocs[0].type = BFD_RELOC_UNUSED;
11562 return;
11563 }
11564 else if (Rs == REG_PC)
11565 {
11566 /* Always use addw/subw. */
11567 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11568 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11569 }
11570 else
11571 {
11572 inst.instruction = THUMB_OP32 (inst.instruction);
11573 inst.instruction = (inst.instruction & 0xe1ffffff)
11574 | 0x10000000;
11575 if (flags)
11576 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11577 else
11578 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11579 }
11580 inst.instruction |= Rd << 8;
11581 inst.instruction |= Rs << 16;
11582 }
11583 }
11584 else
11585 {
11586 unsigned int value = inst.relocs[0].exp.X_add_number;
11587 unsigned int shift = inst.operands[2].shift_kind;
11588
11589 Rn = inst.operands[2].reg;
11590 /* See if we can do this with a 16-bit instruction. */
11591 if (!inst.operands[2].shifted && inst.size_req != 4)
11592 {
11593 if (Rd > 7 || Rs > 7 || Rn > 7)
11594 narrow = FALSE;
11595
11596 if (narrow)
11597 {
11598 inst.instruction = ((inst.instruction == T_MNEM_adds
11599 || inst.instruction == T_MNEM_add)
11600 ? T_OPCODE_ADD_R3
11601 : T_OPCODE_SUB_R3);
11602 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11603 return;
11604 }
11605
11606 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11607 {
11608 /* Thumb-1 cores (except v6-M) require at least one high
11609 register in a narrow non flag setting add. */
11610 if (Rd > 7 || Rn > 7
11611 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11612 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11613 {
11614 if (Rd == Rn)
11615 {
11616 Rn = Rs;
11617 Rs = Rd;
11618 }
11619 inst.instruction = T_OPCODE_ADD_HI;
11620 inst.instruction |= (Rd & 8) << 4;
11621 inst.instruction |= (Rd & 7);
11622 inst.instruction |= Rn << 3;
11623 return;
11624 }
11625 }
11626 }
11627
11628 constraint (Rd == REG_PC, BAD_PC);
11629 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11630 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11631 constraint (Rs == REG_PC, BAD_PC);
11632 reject_bad_reg (Rn);
11633
11634 /* If we get here, it can't be done in 16 bits. */
11635 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11636 _("shift must be constant"));
11637 inst.instruction = THUMB_OP32 (inst.instruction);
11638 inst.instruction |= Rd << 8;
11639 inst.instruction |= Rs << 16;
11640 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11641 _("shift value over 3 not allowed in thumb mode"));
11642 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11643 _("only LSL shift allowed in thumb mode"));
11644 encode_thumb32_shifted_operand (2);
11645 }
11646 }
11647 else
11648 {
11649 constraint (inst.instruction == T_MNEM_adds
11650 || inst.instruction == T_MNEM_subs,
11651 BAD_THUMB32);
11652
11653 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11654 {
11655 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11656 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11657 BAD_HIREG);
11658
11659 inst.instruction = (inst.instruction == T_MNEM_add
11660 ? 0x0000 : 0x8000);
11661 inst.instruction |= (Rd << 4) | Rs;
11662 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11663 return;
11664 }
11665
11666 Rn = inst.operands[2].reg;
11667 constraint (inst.operands[2].shifted, _("unshifted register required"));
11668
11669 /* We now have Rd, Rs, and Rn set to registers. */
11670 if (Rd > 7 || Rs > 7 || Rn > 7)
11671 {
11672 /* Can't do this for SUB. */
11673 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11674 inst.instruction = T_OPCODE_ADD_HI;
11675 inst.instruction |= (Rd & 8) << 4;
11676 inst.instruction |= (Rd & 7);
11677 if (Rs == Rd)
11678 inst.instruction |= Rn << 3;
11679 else if (Rn == Rd)
11680 inst.instruction |= Rs << 3;
11681 else
11682 constraint (1, _("dest must overlap one source register"));
11683 }
11684 else
11685 {
11686 inst.instruction = (inst.instruction == T_MNEM_add
11687 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11688 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11689 }
11690 }
11691}
11692
11693static void
11694do_t_adr (void)
11695{
11696 unsigned Rd;
11697
11698 Rd = inst.operands[0].reg;
11699 reject_bad_reg (Rd);
11700
11701 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11702 {
11703 /* Defer to section relaxation. */
11704 inst.relax = inst.instruction;
11705 inst.instruction = THUMB_OP16 (inst.instruction);
11706 inst.instruction |= Rd << 4;
11707 }
11708 else if (unified_syntax && inst.size_req != 2)
11709 {
11710 /* Generate a 32-bit opcode. */
11711 inst.instruction = THUMB_OP32 (inst.instruction);
11712 inst.instruction |= Rd << 8;
11713 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11714 inst.relocs[0].pc_rel = 1;
11715 }
11716 else
11717 {
11718 /* Generate a 16-bit opcode. */
11719 inst.instruction = THUMB_OP16 (inst.instruction);
11720 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11721 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11722 inst.relocs[0].pc_rel = 1;
11723 inst.instruction |= Rd << 4;
11724 }
11725
11726 if (inst.relocs[0].exp.X_op == O_symbol
11727 && inst.relocs[0].exp.X_add_symbol != NULL
11728 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11729 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11730 inst.relocs[0].exp.X_add_number += 1;
11731}
11732
11733/* Arithmetic instructions for which there is just one 16-bit
11734 instruction encoding, and it allows only two low registers.
11735 For maximal compatibility with ARM syntax, we allow three register
11736 operands even when Thumb-32 instructions are not available, as long
11737 as the first two are identical. For instance, both "sbc r0,r1" and
11738 "sbc r0,r0,r1" are allowed. */
11739static void
11740do_t_arit3 (void)
11741{
11742 int Rd, Rs, Rn;
11743
11744 Rd = inst.operands[0].reg;
11745 Rs = (inst.operands[1].present
11746 ? inst.operands[1].reg /* Rd, Rs, foo */
11747 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11748 Rn = inst.operands[2].reg;
11749
11750 reject_bad_reg (Rd);
11751 reject_bad_reg (Rs);
11752 if (inst.operands[2].isreg)
11753 reject_bad_reg (Rn);
11754
11755 if (unified_syntax)
11756 {
11757 if (!inst.operands[2].isreg)
11758 {
11759 /* For an immediate, we always generate a 32-bit opcode;
11760 section relaxation will shrink it later if possible. */
11761 inst.instruction = THUMB_OP32 (inst.instruction);
11762 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11763 inst.instruction |= Rd << 8;
11764 inst.instruction |= Rs << 16;
11765 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11766 }
11767 else
11768 {
11769 bfd_boolean narrow;
11770
11771 /* See if we can do this with a 16-bit instruction. */
11772 if (THUMB_SETS_FLAGS (inst.instruction))
11773 narrow = !in_pred_block ();
11774 else
11775 narrow = in_pred_block ();
11776
11777 if (Rd > 7 || Rn > 7 || Rs > 7)
11778 narrow = FALSE;
11779 if (inst.operands[2].shifted)
11780 narrow = FALSE;
11781 if (inst.size_req == 4)
11782 narrow = FALSE;
11783
11784 if (narrow
11785 && Rd == Rs)
11786 {
11787 inst.instruction = THUMB_OP16 (inst.instruction);
11788 inst.instruction |= Rd;
11789 inst.instruction |= Rn << 3;
11790 return;
11791 }
11792
11793 /* If we get here, it can't be done in 16 bits. */
11794 constraint (inst.operands[2].shifted
11795 && inst.operands[2].immisreg,
11796 _("shift must be constant"));
11797 inst.instruction = THUMB_OP32 (inst.instruction);
11798 inst.instruction |= Rd << 8;
11799 inst.instruction |= Rs << 16;
11800 encode_thumb32_shifted_operand (2);
11801 }
11802 }
11803 else
11804 {
11805 /* On its face this is a lie - the instruction does set the
11806 flags. However, the only supported mnemonic in this mode
11807 says it doesn't. */
11808 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11809
11810 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11811 _("unshifted register required"));
11812 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11813 constraint (Rd != Rs,
11814 _("dest and source1 must be the same register"));
11815
11816 inst.instruction = THUMB_OP16 (inst.instruction);
11817 inst.instruction |= Rd;
11818 inst.instruction |= Rn << 3;
11819 }
11820}
11821
11822/* Similarly, but for instructions where the arithmetic operation is
11823 commutative, so we can allow either of them to be different from
11824 the destination operand in a 16-bit instruction. For instance, all
11825 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11826 accepted. */
11827static void
11828do_t_arit3c (void)
11829{
11830 int Rd, Rs, Rn;
11831
11832 Rd = inst.operands[0].reg;
11833 Rs = (inst.operands[1].present
11834 ? inst.operands[1].reg /* Rd, Rs, foo */
11835 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11836 Rn = inst.operands[2].reg;
11837
11838 reject_bad_reg (Rd);
11839 reject_bad_reg (Rs);
11840 if (inst.operands[2].isreg)
11841 reject_bad_reg (Rn);
11842
11843 if (unified_syntax)
11844 {
11845 if (!inst.operands[2].isreg)
11846 {
11847 /* For an immediate, we always generate a 32-bit opcode;
11848 section relaxation will shrink it later if possible. */
11849 inst.instruction = THUMB_OP32 (inst.instruction);
11850 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11851 inst.instruction |= Rd << 8;
11852 inst.instruction |= Rs << 16;
11853 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11854 }
11855 else
11856 {
11857 bfd_boolean narrow;
11858
11859 /* See if we can do this with a 16-bit instruction. */
11860 if (THUMB_SETS_FLAGS (inst.instruction))
11861 narrow = !in_pred_block ();
11862 else
11863 narrow = in_pred_block ();
11864
11865 if (Rd > 7 || Rn > 7 || Rs > 7)
11866 narrow = FALSE;
11867 if (inst.operands[2].shifted)
11868 narrow = FALSE;
11869 if (inst.size_req == 4)
11870 narrow = FALSE;
11871
11872 if (narrow)
11873 {
11874 if (Rd == Rs)
11875 {
11876 inst.instruction = THUMB_OP16 (inst.instruction);
11877 inst.instruction |= Rd;
11878 inst.instruction |= Rn << 3;
11879 return;
11880 }
11881 if (Rd == Rn)
11882 {
11883 inst.instruction = THUMB_OP16 (inst.instruction);
11884 inst.instruction |= Rd;
11885 inst.instruction |= Rs << 3;
11886 return;
11887 }
11888 }
11889
11890 /* If we get here, it can't be done in 16 bits. */
11891 constraint (inst.operands[2].shifted
11892 && inst.operands[2].immisreg,
11893 _("shift must be constant"));
11894 inst.instruction = THUMB_OP32 (inst.instruction);
11895 inst.instruction |= Rd << 8;
11896 inst.instruction |= Rs << 16;
11897 encode_thumb32_shifted_operand (2);
11898 }
11899 }
11900 else
11901 {
11902 /* On its face this is a lie - the instruction does set the
11903 flags. However, the only supported mnemonic in this mode
11904 says it doesn't. */
11905 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11906
11907 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11908 _("unshifted register required"));
11909 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11910
11911 inst.instruction = THUMB_OP16 (inst.instruction);
11912 inst.instruction |= Rd;
11913
11914 if (Rd == Rs)
11915 inst.instruction |= Rn << 3;
11916 else if (Rd == Rn)
11917 inst.instruction |= Rs << 3;
11918 else
11919 constraint (1, _("dest must overlap one source register"));
11920 }
11921}
11922
11923static void
11924do_t_bfc (void)
11925{
11926 unsigned Rd;
11927 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11928 constraint (msb > 32, _("bit-field extends past end of register"));
11929 /* The instruction encoding stores the LSB and MSB,
11930 not the LSB and width. */
11931 Rd = inst.operands[0].reg;
11932 reject_bad_reg (Rd);
11933 inst.instruction |= Rd << 8;
11934 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11935 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11936 inst.instruction |= msb - 1;
11937}
11938
11939static void
11940do_t_bfi (void)
11941{
11942 int Rd, Rn;
11943 unsigned int msb;
11944
11945 Rd = inst.operands[0].reg;
11946 reject_bad_reg (Rd);
11947
11948 /* #0 in second position is alternative syntax for bfc, which is
11949 the same instruction but with REG_PC in the Rm field. */
11950 if (!inst.operands[1].isreg)
11951 Rn = REG_PC;
11952 else
11953 {
11954 Rn = inst.operands[1].reg;
11955 reject_bad_reg (Rn);
11956 }
11957
11958 msb = inst.operands[2].imm + inst.operands[3].imm;
11959 constraint (msb > 32, _("bit-field extends past end of register"));
11960 /* The instruction encoding stores the LSB and MSB,
11961 not the LSB and width. */
11962 inst.instruction |= Rd << 8;
11963 inst.instruction |= Rn << 16;
11964 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11965 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11966 inst.instruction |= msb - 1;
11967}
11968
11969static void
11970do_t_bfx (void)
11971{
11972 unsigned Rd, Rn;
11973
11974 Rd = inst.operands[0].reg;
11975 Rn = inst.operands[1].reg;
11976
11977 reject_bad_reg (Rd);
11978 reject_bad_reg (Rn);
11979
11980 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11981 _("bit-field extends past end of register"));
11982 inst.instruction |= Rd << 8;
11983 inst.instruction |= Rn << 16;
11984 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11985 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11986 inst.instruction |= inst.operands[3].imm - 1;
11987}
11988
11989/* ARM V5 Thumb BLX (argument parse)
11990 BLX <target_addr> which is BLX(1)
11991 BLX <Rm> which is BLX(2)
11992 Unfortunately, there are two different opcodes for this mnemonic.
11993 So, the insns[].value is not used, and the code here zaps values
11994 into inst.instruction.
11995
11996 ??? How to take advantage of the additional two bits of displacement
11997 available in Thumb32 mode? Need new relocation? */
11998
11999static void
12000do_t_blx (void)
12001{
12002 set_pred_insn_type_last ();
12003
12004 if (inst.operands[0].isreg)
12005 {
12006 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12007 /* We have a register, so this is BLX(2). */
12008 inst.instruction |= inst.operands[0].reg << 3;
12009 }
12010 else
12011 {
12012 /* No register. This must be BLX(1). */
12013 inst.instruction = 0xf000e800;
12014 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12015 }
12016}
12017
12018static void
12019do_t_branch (void)
12020{
12021 int opcode;
12022 int cond;
12023 bfd_reloc_code_real_type reloc;
12024
12025 cond = inst.cond;
12026 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12027
12028 if (in_pred_block ())
12029 {
12030 /* Conditional branches inside IT blocks are encoded as unconditional
12031 branches. */
12032 cond = COND_ALWAYS;
12033 }
12034 else
12035 cond = inst.cond;
12036
12037 if (cond != COND_ALWAYS)
12038 opcode = T_MNEM_bcond;
12039 else
12040 opcode = inst.instruction;
12041
12042 if (unified_syntax
12043 && (inst.size_req == 4
12044 || (inst.size_req != 2
12045 && (inst.operands[0].hasreloc
12046 || inst.relocs[0].exp.X_op == O_constant))))
12047 {
12048 inst.instruction = THUMB_OP32(opcode);
12049 if (cond == COND_ALWAYS)
12050 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12051 else
12052 {
12053 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12054 _("selected architecture does not support "
12055 "wide conditional branch instruction"));
12056
12057 gas_assert (cond != 0xF);
12058 inst.instruction |= cond << 22;
12059 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12060 }
12061 }
12062 else
12063 {
12064 inst.instruction = THUMB_OP16(opcode);
12065 if (cond == COND_ALWAYS)
12066 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12067 else
12068 {
12069 inst.instruction |= cond << 8;
12070 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12071 }
12072 /* Allow section relaxation. */
12073 if (unified_syntax && inst.size_req != 2)
12074 inst.relax = opcode;
12075 }
12076 inst.relocs[0].type = reloc;
12077 inst.relocs[0].pc_rel = 1;
12078}
12079
12080/* Actually do the work for Thumb state bkpt and hlt. The only difference
12081 between the two is the maximum immediate allowed - which is passed in
12082 RANGE. */
12083static void
12084do_t_bkpt_hlt1 (int range)
12085{
12086 constraint (inst.cond != COND_ALWAYS,
12087 _("instruction is always unconditional"));
12088 if (inst.operands[0].present)
12089 {
12090 constraint (inst.operands[0].imm > range,
12091 _("immediate value out of range"));
12092 inst.instruction |= inst.operands[0].imm;
12093 }
12094
12095 set_pred_insn_type (NEUTRAL_IT_INSN);
12096}
12097
12098static void
12099do_t_hlt (void)
12100{
12101 do_t_bkpt_hlt1 (63);
12102}
12103
12104static void
12105do_t_bkpt (void)
12106{
12107 do_t_bkpt_hlt1 (255);
12108}
12109
12110static void
12111do_t_branch23 (void)
12112{
12113 set_pred_insn_type_last ();
12114 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12115
12116 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12117 this file. We used to simply ignore the PLT reloc type here --
12118 the branch encoding is now needed to deal with TLSCALL relocs.
12119 So if we see a PLT reloc now, put it back to how it used to be to
12120 keep the preexisting behaviour. */
12121 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12122 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12123
12124#if defined(OBJ_COFF)
12125 /* If the destination of the branch is a defined symbol which does not have
12126 the THUMB_FUNC attribute, then we must be calling a function which has
12127 the (interfacearm) attribute. We look for the Thumb entry point to that
12128 function and change the branch to refer to that function instead. */
12129 if ( inst.relocs[0].exp.X_op == O_symbol
12130 && inst.relocs[0].exp.X_add_symbol != NULL
12131 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12132 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12133 inst.relocs[0].exp.X_add_symbol
12134 = find_real_start (inst.relocs[0].exp.X_add_symbol);
12135#endif
12136}
12137
12138static void
12139do_t_bx (void)
12140{
12141 set_pred_insn_type_last ();
12142 inst.instruction |= inst.operands[0].reg << 3;
12143 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
12144 should cause the alignment to be checked once it is known. This is
12145 because BX PC only works if the instruction is word aligned. */
12146}
12147
12148static void
12149do_t_bxj (void)
12150{
12151 int Rm;
12152
12153 set_pred_insn_type_last ();
12154 Rm = inst.operands[0].reg;
12155 reject_bad_reg (Rm);
12156 inst.instruction |= Rm << 16;
12157}
12158
12159static void
12160do_t_clz (void)
12161{
12162 unsigned Rd;
12163 unsigned Rm;
12164
12165 Rd = inst.operands[0].reg;
12166 Rm = inst.operands[1].reg;
12167
12168 reject_bad_reg (Rd);
12169 reject_bad_reg (Rm);
12170
12171 inst.instruction |= Rd << 8;
12172 inst.instruction |= Rm << 16;
12173 inst.instruction |= Rm;
12174}
12175
12176/* For the Armv8.1-M conditional instructions. */
12177static void
12178do_t_cond (void)
12179{
12180 unsigned Rd, Rn, Rm;
12181 signed int cond;
12182
12183 constraint (inst.cond != COND_ALWAYS, BAD_COND);
12184
12185 Rd = inst.operands[0].reg;
12186 switch (inst.instruction)
12187 {
12188 case T_MNEM_csinc:
12189 case T_MNEM_csinv:
12190 case T_MNEM_csneg:
12191 case T_MNEM_csel:
12192 Rn = inst.operands[1].reg;
12193 Rm = inst.operands[2].reg;
12194 cond = inst.operands[3].imm;
12195 constraint (Rn == REG_SP, BAD_SP);
12196 constraint (Rm == REG_SP, BAD_SP);
12197 break;
12198
12199 case T_MNEM_cinc:
12200 case T_MNEM_cinv:
12201 case T_MNEM_cneg:
12202 Rn = inst.operands[1].reg;
12203 cond = inst.operands[2].imm;
12204 /* Invert the last bit to invert the cond. */
12205 cond = TOGGLE_BIT (cond, 0);
12206 constraint (Rn == REG_SP, BAD_SP);
12207 Rm = Rn;
12208 break;
12209
12210 case T_MNEM_csetm:
12211 case T_MNEM_cset:
12212 cond = inst.operands[1].imm;
12213 /* Invert the last bit to invert the cond. */
12214 cond = TOGGLE_BIT (cond, 0);
12215 Rn = REG_PC;
12216 Rm = REG_PC;
12217 break;
12218
12219 default: abort ();
12220 }
12221
12222 set_pred_insn_type (OUTSIDE_PRED_INSN);
12223 inst.instruction = THUMB_OP32 (inst.instruction);
12224 inst.instruction |= Rd << 8;
12225 inst.instruction |= Rn << 16;
12226 inst.instruction |= Rm;
12227 inst.instruction |= cond << 4;
12228}
12229
12230static void
12231do_t_csdb (void)
12232{
12233 set_pred_insn_type (OUTSIDE_PRED_INSN);
12234}
12235
12236static void
12237do_t_cps (void)
12238{
12239 set_pred_insn_type (OUTSIDE_PRED_INSN);
12240 inst.instruction |= inst.operands[0].imm;
12241}
12242
12243static void
12244do_t_cpsi (void)
12245{
12246 set_pred_insn_type (OUTSIDE_PRED_INSN);
12247 if (unified_syntax
12248 && (inst.operands[1].present || inst.size_req == 4)
12249 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12250 {
12251 unsigned int imod = (inst.instruction & 0x0030) >> 4;
12252 inst.instruction = 0xf3af8000;
12253 inst.instruction |= imod << 9;
12254 inst.instruction |= inst.operands[0].imm << 5;
12255 if (inst.operands[1].present)
12256 inst.instruction |= 0x100 | inst.operands[1].imm;
12257 }
12258 else
12259 {
12260 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12261 && (inst.operands[0].imm & 4),
12262 _("selected processor does not support 'A' form "
12263 "of this instruction"));
12264 constraint (inst.operands[1].present || inst.size_req == 4,
12265 _("Thumb does not support the 2-argument "
12266 "form of this instruction"));
12267 inst.instruction |= inst.operands[0].imm;
12268 }
12269}
12270
12271/* THUMB CPY instruction (argument parse). */
12272
12273static void
12274do_t_cpy (void)
12275{
12276 if (inst.size_req == 4)
12277 {
12278 inst.instruction = THUMB_OP32 (T_MNEM_mov);
12279 inst.instruction |= inst.operands[0].reg << 8;
12280 inst.instruction |= inst.operands[1].reg;
12281 }
12282 else
12283 {
12284 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12285 inst.instruction |= (inst.operands[0].reg & 0x7);
12286 inst.instruction |= inst.operands[1].reg << 3;
12287 }
12288}
12289
12290static void
12291do_t_cbz (void)
12292{
12293 set_pred_insn_type (OUTSIDE_PRED_INSN);
12294 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12295 inst.instruction |= inst.operands[0].reg;
12296 inst.relocs[0].pc_rel = 1;
12297 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12298}
12299
12300static void
12301do_t_dbg (void)
12302{
12303 inst.instruction |= inst.operands[0].imm;
12304}
12305
12306static void
12307do_t_div (void)
12308{
12309 unsigned Rd, Rn, Rm;
12310
12311 Rd = inst.operands[0].reg;
12312 Rn = (inst.operands[1].present
12313 ? inst.operands[1].reg : Rd);
12314 Rm = inst.operands[2].reg;
12315
12316 reject_bad_reg (Rd);
12317 reject_bad_reg (Rn);
12318 reject_bad_reg (Rm);
12319
12320 inst.instruction |= Rd << 8;
12321 inst.instruction |= Rn << 16;
12322 inst.instruction |= Rm;
12323}
12324
12325static void
12326do_t_hint (void)
12327{
12328 if (unified_syntax && inst.size_req == 4)
12329 inst.instruction = THUMB_OP32 (inst.instruction);
12330 else
12331 inst.instruction = THUMB_OP16 (inst.instruction);
12332}
12333
12334static void
12335do_t_it (void)
12336{
12337 unsigned int cond = inst.operands[0].imm;
12338
12339 set_pred_insn_type (IT_INSN);
12340 now_pred.mask = (inst.instruction & 0xf) | 0x10;
12341 now_pred.cc = cond;
12342 now_pred.warn_deprecated = FALSE;
12343 now_pred.type = SCALAR_PRED;
12344
12345 /* If the condition is a negative condition, invert the mask. */
12346 if ((cond & 0x1) == 0x0)
12347 {
12348 unsigned int mask = inst.instruction & 0x000f;
12349
12350 if ((mask & 0x7) == 0)
12351 {
12352 /* No conversion needed. */
12353 now_pred.block_length = 1;
12354 }
12355 else if ((mask & 0x3) == 0)
12356 {
12357 mask ^= 0x8;
12358 now_pred.block_length = 2;
12359 }
12360 else if ((mask & 0x1) == 0)
12361 {
12362 mask ^= 0xC;
12363 now_pred.block_length = 3;
12364 }
12365 else
12366 {
12367 mask ^= 0xE;
12368 now_pred.block_length = 4;
12369 }
12370
12371 inst.instruction &= 0xfff0;
12372 inst.instruction |= mask;
12373 }
12374
12375 inst.instruction |= cond << 4;
12376}
12377
12378/* Helper function used for both push/pop and ldm/stm. */
12379static void
12380encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
12381 bfd_boolean writeback)
12382{
12383 bfd_boolean load, store;
12384
12385 gas_assert (base != -1 || !do_io);
12386 load = do_io && ((inst.instruction & (1 << 20)) != 0);
12387 store = do_io && !load;
12388
12389 if (mask & (1 << 13))
12390 inst.error = _("SP not allowed in register list");
12391
12392 if (do_io && (mask & (1 << base)) != 0
12393 && writeback)
12394 inst.error = _("having the base register in the register list when "
12395 "using write back is UNPREDICTABLE");
12396
12397 if (load)
12398 {
12399 if (mask & (1 << 15))
12400 {
12401 if (mask & (1 << 14))
12402 inst.error = _("LR and PC should not both be in register list");
12403 else
12404 set_pred_insn_type_last ();
12405 }
12406 }
12407 else if (store)
12408 {
12409 if (mask & (1 << 15))
12410 inst.error = _("PC not allowed in register list");
12411 }
12412
12413 if (do_io && ((mask & (mask - 1)) == 0))
12414 {
12415 /* Single register transfers implemented as str/ldr. */
12416 if (writeback)
12417 {
12418 if (inst.instruction & (1 << 23))
12419 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12420 else
12421 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12422 }
12423 else
12424 {
12425 if (inst.instruction & (1 << 23))
12426 inst.instruction = 0x00800000; /* ia -> [base] */
12427 else
12428 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12429 }
12430
12431 inst.instruction |= 0xf8400000;
12432 if (load)
12433 inst.instruction |= 0x00100000;
12434
12435 mask = ffs (mask) - 1;
12436 mask <<= 12;
12437 }
12438 else if (writeback)
12439 inst.instruction |= WRITE_BACK;
12440
12441 inst.instruction |= mask;
12442 if (do_io)
12443 inst.instruction |= base << 16;
12444}
12445
12446static void
12447do_t_ldmstm (void)
12448{
12449 /* This really doesn't seem worth it. */
12450 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12451 _("expression too complex"));
12452 constraint (inst.operands[1].writeback,
12453 _("Thumb load/store multiple does not support {reglist}^"));
12454
12455 if (unified_syntax)
12456 {
12457 bfd_boolean narrow;
12458 unsigned mask;
12459
12460 narrow = FALSE;
12461 /* See if we can use a 16-bit instruction. */
12462 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12463 && inst.size_req != 4
12464 && !(inst.operands[1].imm & ~0xff))
12465 {
12466 mask = 1 << inst.operands[0].reg;
12467
12468 if (inst.operands[0].reg <= 7)
12469 {
12470 if (inst.instruction == T_MNEM_stmia
12471 ? inst.operands[0].writeback
12472 : (inst.operands[0].writeback
12473 == !(inst.operands[1].imm & mask)))
12474 {
12475 if (inst.instruction == T_MNEM_stmia
12476 && (inst.operands[1].imm & mask)
12477 && (inst.operands[1].imm & (mask - 1)))
12478 as_warn (_("value stored for r%d is UNKNOWN"),
12479 inst.operands[0].reg);
12480
12481 inst.instruction = THUMB_OP16 (inst.instruction);
12482 inst.instruction |= inst.operands[0].reg << 8;
12483 inst.instruction |= inst.operands[1].imm;
12484 narrow = TRUE;
12485 }
12486 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12487 {
12488 /* This means 1 register in reg list one of 3 situations:
12489 1. Instruction is stmia, but without writeback.
12490 2. lmdia without writeback, but with Rn not in
12491 reglist.
12492 3. ldmia with writeback, but with Rn in reglist.
12493 Case 3 is UNPREDICTABLE behaviour, so we handle
12494 case 1 and 2 which can be converted into a 16-bit
12495 str or ldr. The SP cases are handled below. */
12496 unsigned long opcode;
12497 /* First, record an error for Case 3. */
12498 if (inst.operands[1].imm & mask
12499 && inst.operands[0].writeback)
12500 inst.error =
12501 _("having the base register in the register list when "
12502 "using write back is UNPREDICTABLE");
12503
12504 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12505 : T_MNEM_ldr);
12506 inst.instruction = THUMB_OP16 (opcode);
12507 inst.instruction |= inst.operands[0].reg << 3;
12508 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12509 narrow = TRUE;
12510 }
12511 }
12512 else if (inst.operands[0] .reg == REG_SP)
12513 {
12514 if (inst.operands[0].writeback)
12515 {
12516 inst.instruction =
12517 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12518 ? T_MNEM_push : T_MNEM_pop);
12519 inst.instruction |= inst.operands[1].imm;
12520 narrow = TRUE;
12521 }
12522 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12523 {
12524 inst.instruction =
12525 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12526 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12527 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12528 narrow = TRUE;
12529 }
12530 }
12531 }
12532
12533 if (!narrow)
12534 {
12535 if (inst.instruction < 0xffff)
12536 inst.instruction = THUMB_OP32 (inst.instruction);
12537
12538 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12539 inst.operands[1].imm,
12540 inst.operands[0].writeback);
12541 }
12542 }
12543 else
12544 {
12545 constraint (inst.operands[0].reg > 7
12546 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12547 constraint (inst.instruction != T_MNEM_ldmia
12548 && inst.instruction != T_MNEM_stmia,
12549 _("Thumb-2 instruction only valid in unified syntax"));
12550 if (inst.instruction == T_MNEM_stmia)
12551 {
12552 if (!inst.operands[0].writeback)
12553 as_warn (_("this instruction will write back the base register"));
12554 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12555 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12556 as_warn (_("value stored for r%d is UNKNOWN"),
12557 inst.operands[0].reg);
12558 }
12559 else
12560 {
12561 if (!inst.operands[0].writeback
12562 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12563 as_warn (_("this instruction will write back the base register"));
12564 else if (inst.operands[0].writeback
12565 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12566 as_warn (_("this instruction will not write back the base register"));
12567 }
12568
12569 inst.instruction = THUMB_OP16 (inst.instruction);
12570 inst.instruction |= inst.operands[0].reg << 8;
12571 inst.instruction |= inst.operands[1].imm;
12572 }
12573}
12574
12575static void
12576do_t_ldrex (void)
12577{
12578 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12579 || inst.operands[1].postind || inst.operands[1].writeback
12580 || inst.operands[1].immisreg || inst.operands[1].shifted
12581 || inst.operands[1].negative,
12582 BAD_ADDR_MODE);
12583
12584 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12585
12586 inst.instruction |= inst.operands[0].reg << 12;
12587 inst.instruction |= inst.operands[1].reg << 16;
12588 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12589}
12590
12591static void
12592do_t_ldrexd (void)
12593{
12594 if (!inst.operands[1].present)
12595 {
12596 constraint (inst.operands[0].reg == REG_LR,
12597 _("r14 not allowed as first register "
12598 "when second register is omitted"));
12599 inst.operands[1].reg = inst.operands[0].reg + 1;
12600 }
12601 constraint (inst.operands[0].reg == inst.operands[1].reg,
12602 BAD_OVERLAP);
12603
12604 inst.instruction |= inst.operands[0].reg << 12;
12605 inst.instruction |= inst.operands[1].reg << 8;
12606 inst.instruction |= inst.operands[2].reg << 16;
12607}
12608
12609static void
12610do_t_ldst (void)
12611{
12612 unsigned long opcode;
12613 int Rn;
12614
12615 if (inst.operands[0].isreg
12616 && !inst.operands[0].preind
12617 && inst.operands[0].reg == REG_PC)
12618 set_pred_insn_type_last ();
12619
12620 opcode = inst.instruction;
12621 if (unified_syntax)
12622 {
12623 if (!inst.operands[1].isreg)
12624 {
12625 if (opcode <= 0xffff)
12626 inst.instruction = THUMB_OP32 (opcode);
12627 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12628 return;
12629 }
12630 if (inst.operands[1].isreg
12631 && !inst.operands[1].writeback
12632 && !inst.operands[1].shifted && !inst.operands[1].postind
12633 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12634 && opcode <= 0xffff
12635 && inst.size_req != 4)
12636 {
12637 /* Insn may have a 16-bit form. */
12638 Rn = inst.operands[1].reg;
12639 if (inst.operands[1].immisreg)
12640 {
12641 inst.instruction = THUMB_OP16 (opcode);
12642 /* [Rn, Rik] */
12643 if (Rn <= 7 && inst.operands[1].imm <= 7)
12644 goto op16;
12645 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12646 reject_bad_reg (inst.operands[1].imm);
12647 }
12648 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12649 && opcode != T_MNEM_ldrsb)
12650 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12651 || (Rn == REG_SP && opcode == T_MNEM_str))
12652 {
12653 /* [Rn, #const] */
12654 if (Rn > 7)
12655 {
12656 if (Rn == REG_PC)
12657 {
12658 if (inst.relocs[0].pc_rel)
12659 opcode = T_MNEM_ldr_pc2;
12660 else
12661 opcode = T_MNEM_ldr_pc;
12662 }
12663 else
12664 {
12665 if (opcode == T_MNEM_ldr)
12666 opcode = T_MNEM_ldr_sp;
12667 else
12668 opcode = T_MNEM_str_sp;
12669 }
12670 inst.instruction = inst.operands[0].reg << 8;
12671 }
12672 else
12673 {
12674 inst.instruction = inst.operands[0].reg;
12675 inst.instruction |= inst.operands[1].reg << 3;
12676 }
12677 inst.instruction |= THUMB_OP16 (opcode);
12678 if (inst.size_req == 2)
12679 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12680 else
12681 inst.relax = opcode;
12682 return;
12683 }
12684 }
12685 /* Definitely a 32-bit variant. */
12686
12687 /* Warning for Erratum 752419. */
12688 if (opcode == T_MNEM_ldr
12689 && inst.operands[0].reg == REG_SP
12690 && inst.operands[1].writeback == 1
12691 && !inst.operands[1].immisreg)
12692 {
12693 if (no_cpu_selected ()
12694 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12695 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12696 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12697 as_warn (_("This instruction may be unpredictable "
12698 "if executed on M-profile cores "
12699 "with interrupts enabled."));
12700 }
12701
12702 /* Do some validations regarding addressing modes. */
12703 if (inst.operands[1].immisreg)
12704 reject_bad_reg (inst.operands[1].imm);
12705
12706 constraint (inst.operands[1].writeback == 1
12707 && inst.operands[0].reg == inst.operands[1].reg,
12708 BAD_OVERLAP);
12709
12710 inst.instruction = THUMB_OP32 (opcode);
12711 inst.instruction |= inst.operands[0].reg << 12;
12712 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12713 check_ldr_r15_aligned ();
12714 return;
12715 }
12716
12717 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12718
12719 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12720 {
12721 /* Only [Rn,Rm] is acceptable. */
12722 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12723 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12724 || inst.operands[1].postind || inst.operands[1].shifted
12725 || inst.operands[1].negative,
12726 _("Thumb does not support this addressing mode"));
12727 inst.instruction = THUMB_OP16 (inst.instruction);
12728 goto op16;
12729 }
12730
12731 inst.instruction = THUMB_OP16 (inst.instruction);
12732 if (!inst.operands[1].isreg)
12733 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12734 return;
12735
12736 constraint (!inst.operands[1].preind
12737 || inst.operands[1].shifted
12738 || inst.operands[1].writeback,
12739 _("Thumb does not support this addressing mode"));
12740 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12741 {
12742 constraint (inst.instruction & 0x0600,
12743 _("byte or halfword not valid for base register"));
12744 constraint (inst.operands[1].reg == REG_PC
12745 && !(inst.instruction & THUMB_LOAD_BIT),
12746 _("r15 based store not allowed"));
12747 constraint (inst.operands[1].immisreg,
12748 _("invalid base register for register offset"));
12749
12750 if (inst.operands[1].reg == REG_PC)
12751 inst.instruction = T_OPCODE_LDR_PC;
12752 else if (inst.instruction & THUMB_LOAD_BIT)
12753 inst.instruction = T_OPCODE_LDR_SP;
12754 else
12755 inst.instruction = T_OPCODE_STR_SP;
12756
12757 inst.instruction |= inst.operands[0].reg << 8;
12758 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12759 return;
12760 }
12761
12762 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12763 if (!inst.operands[1].immisreg)
12764 {
12765 /* Immediate offset. */
12766 inst.instruction |= inst.operands[0].reg;
12767 inst.instruction |= inst.operands[1].reg << 3;
12768 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12769 return;
12770 }
12771
12772 /* Register offset. */
12773 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12774 constraint (inst.operands[1].negative,
12775 _("Thumb does not support this addressing mode"));
12776
12777 op16:
12778 switch (inst.instruction)
12779 {
12780 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12781 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12782 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12783 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12784 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12785 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12786 case 0x5600 /* ldrsb */:
12787 case 0x5e00 /* ldrsh */: break;
12788 default: abort ();
12789 }
12790
12791 inst.instruction |= inst.operands[0].reg;
12792 inst.instruction |= inst.operands[1].reg << 3;
12793 inst.instruction |= inst.operands[1].imm << 6;
12794}
12795
12796static void
12797do_t_ldstd (void)
12798{
12799 if (!inst.operands[1].present)
12800 {
12801 inst.operands[1].reg = inst.operands[0].reg + 1;
12802 constraint (inst.operands[0].reg == REG_LR,
12803 _("r14 not allowed here"));
12804 constraint (inst.operands[0].reg == REG_R12,
12805 _("r12 not allowed here"));
12806 }
12807
12808 if (inst.operands[2].writeback
12809 && (inst.operands[0].reg == inst.operands[2].reg
12810 || inst.operands[1].reg == inst.operands[2].reg))
12811 as_warn (_("base register written back, and overlaps "
12812 "one of transfer registers"));
12813
12814 inst.instruction |= inst.operands[0].reg << 12;
12815 inst.instruction |= inst.operands[1].reg << 8;
12816 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12817}
12818
12819static void
12820do_t_ldstt (void)
12821{
12822 inst.instruction |= inst.operands[0].reg << 12;
12823 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12824}
12825
12826static void
12827do_t_mla (void)
12828{
12829 unsigned Rd, Rn, Rm, Ra;
12830
12831 Rd = inst.operands[0].reg;
12832 Rn = inst.operands[1].reg;
12833 Rm = inst.operands[2].reg;
12834 Ra = inst.operands[3].reg;
12835
12836 reject_bad_reg (Rd);
12837 reject_bad_reg (Rn);
12838 reject_bad_reg (Rm);
12839 reject_bad_reg (Ra);
12840
12841 inst.instruction |= Rd << 8;
12842 inst.instruction |= Rn << 16;
12843 inst.instruction |= Rm;
12844 inst.instruction |= Ra << 12;
12845}
12846
12847static void
12848do_t_mlal (void)
12849{
12850 unsigned RdLo, RdHi, Rn, Rm;
12851
12852 RdLo = inst.operands[0].reg;
12853 RdHi = inst.operands[1].reg;
12854 Rn = inst.operands[2].reg;
12855 Rm = inst.operands[3].reg;
12856
12857 reject_bad_reg (RdLo);
12858 reject_bad_reg (RdHi);
12859 reject_bad_reg (Rn);
12860 reject_bad_reg (Rm);
12861
12862 inst.instruction |= RdLo << 12;
12863 inst.instruction |= RdHi << 8;
12864 inst.instruction |= Rn << 16;
12865 inst.instruction |= Rm;
12866}
12867
12868static void
12869do_t_mov_cmp (void)
12870{
12871 unsigned Rn, Rm;
12872
12873 Rn = inst.operands[0].reg;
12874 Rm = inst.operands[1].reg;
12875
12876 if (Rn == REG_PC)
12877 set_pred_insn_type_last ();
12878
12879 if (unified_syntax)
12880 {
12881 int r0off = (inst.instruction == T_MNEM_mov
12882 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12883 unsigned long opcode;
12884 bfd_boolean narrow;
12885 bfd_boolean low_regs;
12886
12887 low_regs = (Rn <= 7 && Rm <= 7);
12888 opcode = inst.instruction;
12889 if (in_pred_block ())
12890 narrow = opcode != T_MNEM_movs;
12891 else
12892 narrow = opcode != T_MNEM_movs || low_regs;
12893 if (inst.size_req == 4
12894 || inst.operands[1].shifted)
12895 narrow = FALSE;
12896
12897 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12898 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12899 && !inst.operands[1].shifted
12900 && Rn == REG_PC
12901 && Rm == REG_LR)
12902 {
12903 inst.instruction = T2_SUBS_PC_LR;
12904 return;
12905 }
12906
12907 if (opcode == T_MNEM_cmp)
12908 {
12909 constraint (Rn == REG_PC, BAD_PC);
12910 if (narrow)
12911 {
12912 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12913 but valid. */
12914 warn_deprecated_sp (Rm);
12915 /* R15 was documented as a valid choice for Rm in ARMv6,
12916 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12917 tools reject R15, so we do too. */
12918 constraint (Rm == REG_PC, BAD_PC);
12919 }
12920 else
12921 reject_bad_reg (Rm);
12922 }
12923 else if (opcode == T_MNEM_mov
12924 || opcode == T_MNEM_movs)
12925 {
12926 if (inst.operands[1].isreg)
12927 {
12928 if (opcode == T_MNEM_movs)
12929 {
12930 reject_bad_reg (Rn);
12931 reject_bad_reg (Rm);
12932 }
12933 else if (narrow)
12934 {
12935 /* This is mov.n. */
12936 if ((Rn == REG_SP || Rn == REG_PC)
12937 && (Rm == REG_SP || Rm == REG_PC))
12938 {
12939 as_tsktsk (_("Use of r%u as a source register is "
12940 "deprecated when r%u is the destination "
12941 "register."), Rm, Rn);
12942 }
12943 }
12944 else
12945 {
12946 /* This is mov.w. */
12947 constraint (Rn == REG_PC, BAD_PC);
12948 constraint (Rm == REG_PC, BAD_PC);
12949 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12950 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12951 }
12952 }
12953 else
12954 reject_bad_reg (Rn);
12955 }
12956
12957 if (!inst.operands[1].isreg)
12958 {
12959 /* Immediate operand. */
12960 if (!in_pred_block () && opcode == T_MNEM_mov)
12961 narrow = 0;
12962 if (low_regs && narrow)
12963 {
12964 inst.instruction = THUMB_OP16 (opcode);
12965 inst.instruction |= Rn << 8;
12966 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12967 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12968 {
12969 if (inst.size_req == 2)
12970 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12971 else
12972 inst.relax = opcode;
12973 }
12974 }
12975 else
12976 {
12977 constraint ((inst.relocs[0].type
12978 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12979 && (inst.relocs[0].type
12980 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12981 THUMB1_RELOC_ONLY);
12982
12983 inst.instruction = THUMB_OP32 (inst.instruction);
12984 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12985 inst.instruction |= Rn << r0off;
12986 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12987 }
12988 }
12989 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12990 && (inst.instruction == T_MNEM_mov
12991 || inst.instruction == T_MNEM_movs))
12992 {
12993 /* Register shifts are encoded as separate shift instructions. */
12994 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12995
12996 if (in_pred_block ())
12997 narrow = !flags;
12998 else
12999 narrow = flags;
13000
13001 if (inst.size_req == 4)
13002 narrow = FALSE;
13003
13004 if (!low_regs || inst.operands[1].imm > 7)
13005 narrow = FALSE;
13006
13007 if (Rn != Rm)
13008 narrow = FALSE;
13009
13010 switch (inst.operands[1].shift_kind)
13011 {
13012 case SHIFT_LSL:
13013 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13014 break;
13015 case SHIFT_ASR:
13016 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13017 break;
13018 case SHIFT_LSR:
13019 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13020 break;
13021 case SHIFT_ROR:
13022 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13023 break;
13024 default:
13025 abort ();
13026 }
13027
13028 inst.instruction = opcode;
13029 if (narrow)
13030 {
13031 inst.instruction |= Rn;
13032 inst.instruction |= inst.operands[1].imm << 3;
13033 }
13034 else
13035 {
13036 if (flags)
13037 inst.instruction |= CONDS_BIT;
13038
13039 inst.instruction |= Rn << 8;
13040 inst.instruction |= Rm << 16;
13041 inst.instruction |= inst.operands[1].imm;
13042 }
13043 }
13044 else if (!narrow)
13045 {
13046 /* Some mov with immediate shift have narrow variants.
13047 Register shifts are handled above. */
13048 if (low_regs && inst.operands[1].shifted
13049 && (inst.instruction == T_MNEM_mov
13050 || inst.instruction == T_MNEM_movs))
13051 {
13052 if (in_pred_block ())
13053 narrow = (inst.instruction == T_MNEM_mov);
13054 else
13055 narrow = (inst.instruction == T_MNEM_movs);
13056 }
13057
13058 if (narrow)
13059 {
13060 switch (inst.operands[1].shift_kind)
13061 {
13062 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13063 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13064 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13065 default: narrow = FALSE; break;
13066 }
13067 }
13068
13069 if (narrow)
13070 {
13071 inst.instruction |= Rn;
13072 inst.instruction |= Rm << 3;
13073 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13074 }
13075 else
13076 {
13077 inst.instruction = THUMB_OP32 (inst.instruction);
13078 inst.instruction |= Rn << r0off;
13079 encode_thumb32_shifted_operand (1);
13080 }
13081 }
13082 else
13083 switch (inst.instruction)
13084 {
13085 case T_MNEM_mov:
13086 /* In v4t or v5t a move of two lowregs produces unpredictable
13087 results. Don't allow this. */
13088 if (low_regs)
13089 {
13090 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13091 "MOV Rd, Rs with two low registers is not "
13092 "permitted on this architecture");
13093 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13094 arm_ext_v6);
13095 }
13096
13097 inst.instruction = T_OPCODE_MOV_HR;
13098 inst.instruction |= (Rn & 0x8) << 4;
13099 inst.instruction |= (Rn & 0x7);
13100 inst.instruction |= Rm << 3;
13101 break;
13102
13103 case T_MNEM_movs:
13104 /* We know we have low registers at this point.
13105 Generate LSLS Rd, Rs, #0. */
13106 inst.instruction = T_OPCODE_LSL_I;
13107 inst.instruction |= Rn;
13108 inst.instruction |= Rm << 3;
13109 break;
13110
13111 case T_MNEM_cmp:
13112 if (low_regs)
13113 {
13114 inst.instruction = T_OPCODE_CMP_LR;
13115 inst.instruction |= Rn;
13116 inst.instruction |= Rm << 3;
13117 }
13118 else
13119 {
13120 inst.instruction = T_OPCODE_CMP_HR;
13121 inst.instruction |= (Rn & 0x8) << 4;
13122 inst.instruction |= (Rn & 0x7);
13123 inst.instruction |= Rm << 3;
13124 }
13125 break;
13126 }
13127 return;
13128 }
13129
13130 inst.instruction = THUMB_OP16 (inst.instruction);
13131
13132 /* PR 10443: Do not silently ignore shifted operands. */
13133 constraint (inst.operands[1].shifted,
13134 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13135
13136 if (inst.operands[1].isreg)
13137 {
13138 if (Rn < 8 && Rm < 8)
13139 {
13140 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13141 since a MOV instruction produces unpredictable results. */
13142 if (inst.instruction == T_OPCODE_MOV_I8)
13143 inst.instruction = T_OPCODE_ADD_I3;
13144 else
13145 inst.instruction = T_OPCODE_CMP_LR;
13146
13147 inst.instruction |= Rn;
13148 inst.instruction |= Rm << 3;
13149 }
13150 else
13151 {
13152 if (inst.instruction == T_OPCODE_MOV_I8)
13153 inst.instruction = T_OPCODE_MOV_HR;
13154 else
13155 inst.instruction = T_OPCODE_CMP_HR;
13156 do_t_cpy ();
13157 }
13158 }
13159 else
13160 {
13161 constraint (Rn > 7,
13162 _("only lo regs allowed with immediate"));
13163 inst.instruction |= Rn << 8;
13164 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13165 }
13166}
13167
13168static void
13169do_t_mov16 (void)
13170{
13171 unsigned Rd;
13172 bfd_vma imm;
13173 bfd_boolean top;
13174
13175 top = (inst.instruction & 0x00800000) != 0;
13176 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13177 {
13178 constraint (top, _(":lower16: not allowed in this instruction"));
13179 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13180 }
13181 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13182 {
13183 constraint (!top, _(":upper16: not allowed in this instruction"));
13184 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13185 }
13186
13187 Rd = inst.operands[0].reg;
13188 reject_bad_reg (Rd);
13189
13190 inst.instruction |= Rd << 8;
13191 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13192 {
13193 imm = inst.relocs[0].exp.X_add_number;
13194 inst.instruction |= (imm & 0xf000) << 4;
13195 inst.instruction |= (imm & 0x0800) << 15;
13196 inst.instruction |= (imm & 0x0700) << 4;
13197 inst.instruction |= (imm & 0x00ff);
13198 }
13199}
13200
13201static void
13202do_t_mvn_tst (void)
13203{
13204 unsigned Rn, Rm;
13205
13206 Rn = inst.operands[0].reg;
13207 Rm = inst.operands[1].reg;
13208
13209 if (inst.instruction == T_MNEM_cmp
13210 || inst.instruction == T_MNEM_cmn)
13211 constraint (Rn == REG_PC, BAD_PC);
13212 else
13213 reject_bad_reg (Rn);
13214 reject_bad_reg (Rm);
13215
13216 if (unified_syntax)
13217 {
13218 int r0off = (inst.instruction == T_MNEM_mvn
13219 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13220 bfd_boolean narrow;
13221
13222 if (inst.size_req == 4
13223 || inst.instruction > 0xffff
13224 || inst.operands[1].shifted
13225 || Rn > 7 || Rm > 7)
13226 narrow = FALSE;
13227 else if (inst.instruction == T_MNEM_cmn
13228 || inst.instruction == T_MNEM_tst)
13229 narrow = TRUE;
13230 else if (THUMB_SETS_FLAGS (inst.instruction))
13231 narrow = !in_pred_block ();
13232 else
13233 narrow = in_pred_block ();
13234
13235 if (!inst.operands[1].isreg)
13236 {
13237 /* For an immediate, we always generate a 32-bit opcode;
13238 section relaxation will shrink it later if possible. */
13239 if (inst.instruction < 0xffff)
13240 inst.instruction = THUMB_OP32 (inst.instruction);
13241 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13242 inst.instruction |= Rn << r0off;
13243 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13244 }
13245 else
13246 {
13247 /* See if we can do this with a 16-bit instruction. */
13248 if (narrow)
13249 {
13250 inst.instruction = THUMB_OP16 (inst.instruction);
13251 inst.instruction |= Rn;
13252 inst.instruction |= Rm << 3;
13253 }
13254 else
13255 {
13256 constraint (inst.operands[1].shifted
13257 && inst.operands[1].immisreg,
13258 _("shift must be constant"));
13259 if (inst.instruction < 0xffff)
13260 inst.instruction = THUMB_OP32 (inst.instruction);
13261 inst.instruction |= Rn << r0off;
13262 encode_thumb32_shifted_operand (1);
13263 }
13264 }
13265 }
13266 else
13267 {
13268 constraint (inst.instruction > 0xffff
13269 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13270 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13271 _("unshifted register required"));
13272 constraint (Rn > 7 || Rm > 7,
13273 BAD_HIREG);
13274
13275 inst.instruction = THUMB_OP16 (inst.instruction);
13276 inst.instruction |= Rn;
13277 inst.instruction |= Rm << 3;
13278 }
13279}
13280
13281static void
13282do_t_mrs (void)
13283{
13284 unsigned Rd;
13285
13286 if (do_vfp_nsyn_mrs () == SUCCESS)
13287 return;
13288
13289 Rd = inst.operands[0].reg;
13290 reject_bad_reg (Rd);
13291 inst.instruction |= Rd << 8;
13292
13293 if (inst.operands[1].isreg)
13294 {
13295 unsigned br = inst.operands[1].reg;
13296 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13297 as_bad (_("bad register for mrs"));
13298
13299 inst.instruction |= br & (0xf << 16);
13300 inst.instruction |= (br & 0x300) >> 4;
13301 inst.instruction |= (br & SPSR_BIT) >> 2;
13302 }
13303 else
13304 {
13305 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13306
13307 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13308 {
13309 /* PR gas/12698: The constraint is only applied for m_profile.
13310 If the user has specified -march=all, we want to ignore it as
13311 we are building for any CPU type, including non-m variants. */
13312 bfd_boolean m_profile =
13313 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13314 constraint ((flags != 0) && m_profile, _("selected processor does "
13315 "not support requested special purpose register"));
13316 }
13317 else
13318 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13319 devices). */
13320 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13321 _("'APSR', 'CPSR' or 'SPSR' expected"));
13322
13323 inst.instruction |= (flags & SPSR_BIT) >> 2;
13324 inst.instruction |= inst.operands[1].imm & 0xff;
13325 inst.instruction |= 0xf0000;
13326 }
13327}
13328
13329static void
13330do_t_msr (void)
13331{
13332 int flags;
13333 unsigned Rn;
13334
13335 if (do_vfp_nsyn_msr () == SUCCESS)
13336 return;
13337
13338 constraint (!inst.operands[1].isreg,
13339 _("Thumb encoding does not support an immediate here"));
13340
13341 if (inst.operands[0].isreg)
13342 flags = (int)(inst.operands[0].reg);
13343 else
13344 flags = inst.operands[0].imm;
13345
13346 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13347 {
13348 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13349
13350 /* PR gas/12698: The constraint is only applied for m_profile.
13351 If the user has specified -march=all, we want to ignore it as
13352 we are building for any CPU type, including non-m variants. */
13353 bfd_boolean m_profile =
13354 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13355 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13356 && (bits & ~(PSR_s | PSR_f)) != 0)
13357 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13358 && bits != PSR_f)) && m_profile,
13359 _("selected processor does not support requested special "
13360 "purpose register"));
13361 }
13362 else
13363 constraint ((flags & 0xff) != 0, _("selected processor does not support "
13364 "requested special purpose register"));
13365
13366 Rn = inst.operands[1].reg;
13367 reject_bad_reg (Rn);
13368
13369 inst.instruction |= (flags & SPSR_BIT) >> 2;
13370 inst.instruction |= (flags & 0xf0000) >> 8;
13371 inst.instruction |= (flags & 0x300) >> 4;
13372 inst.instruction |= (flags & 0xff);
13373 inst.instruction |= Rn << 16;
13374}
13375
13376static void
13377do_t_mul (void)
13378{
13379 bfd_boolean narrow;
13380 unsigned Rd, Rn, Rm;
13381
13382 if (!inst.operands[2].present)
13383 inst.operands[2].reg = inst.operands[0].reg;
13384
13385 Rd = inst.operands[0].reg;
13386 Rn = inst.operands[1].reg;
13387 Rm = inst.operands[2].reg;
13388
13389 if (unified_syntax)
13390 {
13391 if (inst.size_req == 4
13392 || (Rd != Rn
13393 && Rd != Rm)
13394 || Rn > 7
13395 || Rm > 7)
13396 narrow = FALSE;
13397 else if (inst.instruction == T_MNEM_muls)
13398 narrow = !in_pred_block ();
13399 else
13400 narrow = in_pred_block ();
13401 }
13402 else
13403 {
13404 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13405 constraint (Rn > 7 || Rm > 7,
13406 BAD_HIREG);
13407 narrow = TRUE;
13408 }
13409
13410 if (narrow)
13411 {
13412 /* 16-bit MULS/Conditional MUL. */
13413 inst.instruction = THUMB_OP16 (inst.instruction);
13414 inst.instruction |= Rd;
13415
13416 if (Rd == Rn)
13417 inst.instruction |= Rm << 3;
13418 else if (Rd == Rm)
13419 inst.instruction |= Rn << 3;
13420 else
13421 constraint (1, _("dest must overlap one source register"));
13422 }
13423 else
13424 {
13425 constraint (inst.instruction != T_MNEM_mul,
13426 _("Thumb-2 MUL must not set flags"));
13427 /* 32-bit MUL. */
13428 inst.instruction = THUMB_OP32 (inst.instruction);
13429 inst.instruction |= Rd << 8;
13430 inst.instruction |= Rn << 16;
13431 inst.instruction |= Rm << 0;
13432
13433 reject_bad_reg (Rd);
13434 reject_bad_reg (Rn);
13435 reject_bad_reg (Rm);
13436 }
13437}
13438
13439static void
13440do_t_mull (void)
13441{
13442 unsigned RdLo, RdHi, Rn, Rm;
13443
13444 RdLo = inst.operands[0].reg;
13445 RdHi = inst.operands[1].reg;
13446 Rn = inst.operands[2].reg;
13447 Rm = inst.operands[3].reg;
13448
13449 reject_bad_reg (RdLo);
13450 reject_bad_reg (RdHi);
13451 reject_bad_reg (Rn);
13452 reject_bad_reg (Rm);
13453
13454 inst.instruction |= RdLo << 12;
13455 inst.instruction |= RdHi << 8;
13456 inst.instruction |= Rn << 16;
13457 inst.instruction |= Rm;
13458
13459 if (RdLo == RdHi)
13460 as_tsktsk (_("rdhi and rdlo must be different"));
13461}
13462
13463static void
13464do_t_nop (void)
13465{
13466 set_pred_insn_type (NEUTRAL_IT_INSN);
13467
13468 if (unified_syntax)
13469 {
13470 if (inst.size_req == 4 || inst.operands[0].imm > 15)
13471 {
13472 inst.instruction = THUMB_OP32 (inst.instruction);
13473 inst.instruction |= inst.operands[0].imm;
13474 }
13475 else
13476 {
13477 /* PR9722: Check for Thumb2 availability before
13478 generating a thumb2 nop instruction. */
13479 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13480 {
13481 inst.instruction = THUMB_OP16 (inst.instruction);
13482 inst.instruction |= inst.operands[0].imm << 4;
13483 }
13484 else
13485 inst.instruction = 0x46c0;
13486 }
13487 }
13488 else
13489 {
13490 constraint (inst.operands[0].present,
13491 _("Thumb does not support NOP with hints"));
13492 inst.instruction = 0x46c0;
13493 }
13494}
13495
13496static void
13497do_t_neg (void)
13498{
13499 if (unified_syntax)
13500 {
13501 bfd_boolean narrow;
13502
13503 if (THUMB_SETS_FLAGS (inst.instruction))
13504 narrow = !in_pred_block ();
13505 else
13506 narrow = in_pred_block ();
13507 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13508 narrow = FALSE;
13509 if (inst.size_req == 4)
13510 narrow = FALSE;
13511
13512 if (!narrow)
13513 {
13514 inst.instruction = THUMB_OP32 (inst.instruction);
13515 inst.instruction |= inst.operands[0].reg << 8;
13516 inst.instruction |= inst.operands[1].reg << 16;
13517 }
13518 else
13519 {
13520 inst.instruction = THUMB_OP16 (inst.instruction);
13521 inst.instruction |= inst.operands[0].reg;
13522 inst.instruction |= inst.operands[1].reg << 3;
13523 }
13524 }
13525 else
13526 {
13527 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13528 BAD_HIREG);
13529 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13530
13531 inst.instruction = THUMB_OP16 (inst.instruction);
13532 inst.instruction |= inst.operands[0].reg;
13533 inst.instruction |= inst.operands[1].reg << 3;
13534 }
13535}
13536
13537static void
13538do_t_orn (void)
13539{
13540 unsigned Rd, Rn;
13541
13542 Rd = inst.operands[0].reg;
13543 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13544
13545 reject_bad_reg (Rd);
13546 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13547 reject_bad_reg (Rn);
13548
13549 inst.instruction |= Rd << 8;
13550 inst.instruction |= Rn << 16;
13551
13552 if (!inst.operands[2].isreg)
13553 {
13554 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13555 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13556 }
13557 else
13558 {
13559 unsigned Rm;
13560
13561 Rm = inst.operands[2].reg;
13562 reject_bad_reg (Rm);
13563
13564 constraint (inst.operands[2].shifted
13565 && inst.operands[2].immisreg,
13566 _("shift must be constant"));
13567 encode_thumb32_shifted_operand (2);
13568 }
13569}
13570
13571static void
13572do_t_pkhbt (void)
13573{
13574 unsigned Rd, Rn, Rm;
13575
13576 Rd = inst.operands[0].reg;
13577 Rn = inst.operands[1].reg;
13578 Rm = inst.operands[2].reg;
13579
13580 reject_bad_reg (Rd);
13581 reject_bad_reg (Rn);
13582 reject_bad_reg (Rm);
13583
13584 inst.instruction |= Rd << 8;
13585 inst.instruction |= Rn << 16;
13586 inst.instruction |= Rm;
13587 if (inst.operands[3].present)
13588 {
13589 unsigned int val = inst.relocs[0].exp.X_add_number;
13590 constraint (inst.relocs[0].exp.X_op != O_constant,
13591 _("expression too complex"));
13592 inst.instruction |= (val & 0x1c) << 10;
13593 inst.instruction |= (val & 0x03) << 6;
13594 }
13595}
13596
13597static void
13598do_t_pkhtb (void)
13599{
13600 if (!inst.operands[3].present)
13601 {
13602 unsigned Rtmp;
13603
13604 inst.instruction &= ~0x00000020;
13605
13606 /* PR 10168. Swap the Rm and Rn registers. */
13607 Rtmp = inst.operands[1].reg;
13608 inst.operands[1].reg = inst.operands[2].reg;
13609 inst.operands[2].reg = Rtmp;
13610 }
13611 do_t_pkhbt ();
13612}
13613
13614static void
13615do_t_pld (void)
13616{
13617 if (inst.operands[0].immisreg)
13618 reject_bad_reg (inst.operands[0].imm);
13619
13620 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13621}
13622
13623static void
13624do_t_push_pop (void)
13625{
13626 unsigned mask;
13627
13628 constraint (inst.operands[0].writeback,
13629 _("push/pop do not support {reglist}^"));
13630 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13631 _("expression too complex"));
13632
13633 mask = inst.operands[0].imm;
13634 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13635 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13636 else if (inst.size_req != 4
13637 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13638 ? REG_LR : REG_PC)))
13639 {
13640 inst.instruction = THUMB_OP16 (inst.instruction);
13641 inst.instruction |= THUMB_PP_PC_LR;
13642 inst.instruction |= mask & 0xff;
13643 }
13644 else if (unified_syntax)
13645 {
13646 inst.instruction = THUMB_OP32 (inst.instruction);
13647 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13648 }
13649 else
13650 {
13651 inst.error = _("invalid register list to push/pop instruction");
13652 return;
13653 }
13654}
13655
13656static void
13657do_t_clrm (void)
13658{
13659 if (unified_syntax)
13660 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13661 else
13662 {
13663 inst.error = _("invalid register list to push/pop instruction");
13664 return;
13665 }
13666}
13667
13668static void
13669do_t_vscclrm (void)
13670{
13671 if (inst.operands[0].issingle)
13672 {
13673 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13674 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13675 inst.instruction |= inst.operands[0].imm;
13676 }
13677 else
13678 {
13679 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13680 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13681 inst.instruction |= 1 << 8;
13682 inst.instruction |= inst.operands[0].imm << 1;
13683 }
13684}
13685
13686static void
13687do_t_rbit (void)
13688{
13689 unsigned Rd, Rm;
13690
13691 Rd = inst.operands[0].reg;
13692 Rm = inst.operands[1].reg;
13693
13694 reject_bad_reg (Rd);
13695 reject_bad_reg (Rm);
13696
13697 inst.instruction |= Rd << 8;
13698 inst.instruction |= Rm << 16;
13699 inst.instruction |= Rm;
13700}
13701
13702static void
13703do_t_rev (void)
13704{
13705 unsigned Rd, Rm;
13706
13707 Rd = inst.operands[0].reg;
13708 Rm = inst.operands[1].reg;
13709
13710 reject_bad_reg (Rd);
13711 reject_bad_reg (Rm);
13712
13713 if (Rd <= 7 && Rm <= 7
13714 && inst.size_req != 4)
13715 {
13716 inst.instruction = THUMB_OP16 (inst.instruction);
13717 inst.instruction |= Rd;
13718 inst.instruction |= Rm << 3;
13719 }
13720 else if (unified_syntax)
13721 {
13722 inst.instruction = THUMB_OP32 (inst.instruction);
13723 inst.instruction |= Rd << 8;
13724 inst.instruction |= Rm << 16;
13725 inst.instruction |= Rm;
13726 }
13727 else
13728 inst.error = BAD_HIREG;
13729}
13730
13731static void
13732do_t_rrx (void)
13733{
13734 unsigned Rd, Rm;
13735
13736 Rd = inst.operands[0].reg;
13737 Rm = inst.operands[1].reg;
13738
13739 reject_bad_reg (Rd);
13740 reject_bad_reg (Rm);
13741
13742 inst.instruction |= Rd << 8;
13743 inst.instruction |= Rm;
13744}
13745
13746static void
13747do_t_rsb (void)
13748{
13749 unsigned Rd, Rs;
13750
13751 Rd = inst.operands[0].reg;
13752 Rs = (inst.operands[1].present
13753 ? inst.operands[1].reg /* Rd, Rs, foo */
13754 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13755
13756 reject_bad_reg (Rd);
13757 reject_bad_reg (Rs);
13758 if (inst.operands[2].isreg)
13759 reject_bad_reg (inst.operands[2].reg);
13760
13761 inst.instruction |= Rd << 8;
13762 inst.instruction |= Rs << 16;
13763 if (!inst.operands[2].isreg)
13764 {
13765 bfd_boolean narrow;
13766
13767 if ((inst.instruction & 0x00100000) != 0)
13768 narrow = !in_pred_block ();
13769 else
13770 narrow = in_pred_block ();
13771
13772 if (Rd > 7 || Rs > 7)
13773 narrow = FALSE;
13774
13775 if (inst.size_req == 4 || !unified_syntax)
13776 narrow = FALSE;
13777
13778 if (inst.relocs[0].exp.X_op != O_constant
13779 || inst.relocs[0].exp.X_add_number != 0)
13780 narrow = FALSE;
13781
13782 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13783 relaxation, but it doesn't seem worth the hassle. */
13784 if (narrow)
13785 {
13786 inst.relocs[0].type = BFD_RELOC_UNUSED;
13787 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13788 inst.instruction |= Rs << 3;
13789 inst.instruction |= Rd;
13790 }
13791 else
13792 {
13793 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13794 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13795 }
13796 }
13797 else
13798 encode_thumb32_shifted_operand (2);
13799}
13800
13801static void
13802do_t_setend (void)
13803{
13804 if (warn_on_deprecated
13805 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13806 as_tsktsk (_("setend use is deprecated for ARMv8"));
13807
13808 set_pred_insn_type (OUTSIDE_PRED_INSN);
13809 if (inst.operands[0].imm)
13810 inst.instruction |= 0x8;
13811}
13812
13813static void
13814do_t_shift (void)
13815{
13816 if (!inst.operands[1].present)
13817 inst.operands[1].reg = inst.operands[0].reg;
13818
13819 if (unified_syntax)
13820 {
13821 bfd_boolean narrow;
13822 int shift_kind;
13823
13824 switch (inst.instruction)
13825 {
13826 case T_MNEM_asr:
13827 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13828 case T_MNEM_lsl:
13829 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13830 case T_MNEM_lsr:
13831 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13832 case T_MNEM_ror:
13833 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13834 default: abort ();
13835 }
13836
13837 if (THUMB_SETS_FLAGS (inst.instruction))
13838 narrow = !in_pred_block ();
13839 else
13840 narrow = in_pred_block ();
13841 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13842 narrow = FALSE;
13843 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13844 narrow = FALSE;
13845 if (inst.operands[2].isreg
13846 && (inst.operands[1].reg != inst.operands[0].reg
13847 || inst.operands[2].reg > 7))
13848 narrow = FALSE;
13849 if (inst.size_req == 4)
13850 narrow = FALSE;
13851
13852 reject_bad_reg (inst.operands[0].reg);
13853 reject_bad_reg (inst.operands[1].reg);
13854
13855 if (!narrow)
13856 {
13857 if (inst.operands[2].isreg)
13858 {
13859 reject_bad_reg (inst.operands[2].reg);
13860 inst.instruction = THUMB_OP32 (inst.instruction);
13861 inst.instruction |= inst.operands[0].reg << 8;
13862 inst.instruction |= inst.operands[1].reg << 16;
13863 inst.instruction |= inst.operands[2].reg;
13864
13865 /* PR 12854: Error on extraneous shifts. */
13866 constraint (inst.operands[2].shifted,
13867 _("extraneous shift as part of operand to shift insn"));
13868 }
13869 else
13870 {
13871 inst.operands[1].shifted = 1;
13872 inst.operands[1].shift_kind = shift_kind;
13873 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13874 ? T_MNEM_movs : T_MNEM_mov);
13875 inst.instruction |= inst.operands[0].reg << 8;
13876 encode_thumb32_shifted_operand (1);
13877 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13878 inst.relocs[0].type = BFD_RELOC_UNUSED;
13879 }
13880 }
13881 else
13882 {
13883 if (inst.operands[2].isreg)
13884 {
13885 switch (shift_kind)
13886 {
13887 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13888 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13889 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13890 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13891 default: abort ();
13892 }
13893
13894 inst.instruction |= inst.operands[0].reg;
13895 inst.instruction |= inst.operands[2].reg << 3;
13896
13897 /* PR 12854: Error on extraneous shifts. */
13898 constraint (inst.operands[2].shifted,
13899 _("extraneous shift as part of operand to shift insn"));
13900 }
13901 else
13902 {
13903 switch (shift_kind)
13904 {
13905 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13906 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13907 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13908 default: abort ();
13909 }
13910 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13911 inst.instruction |= inst.operands[0].reg;
13912 inst.instruction |= inst.operands[1].reg << 3;
13913 }
13914 }
13915 }
13916 else
13917 {
13918 constraint (inst.operands[0].reg > 7
13919 || inst.operands[1].reg > 7, BAD_HIREG);
13920 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13921
13922 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13923 {
13924 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13925 constraint (inst.operands[0].reg != inst.operands[1].reg,
13926 _("source1 and dest must be same register"));
13927
13928 switch (inst.instruction)
13929 {
13930 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13931 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13932 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13933 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13934 default: abort ();
13935 }
13936
13937 inst.instruction |= inst.operands[0].reg;
13938 inst.instruction |= inst.operands[2].reg << 3;
13939
13940 /* PR 12854: Error on extraneous shifts. */
13941 constraint (inst.operands[2].shifted,
13942 _("extraneous shift as part of operand to shift insn"));
13943 }
13944 else
13945 {
13946 switch (inst.instruction)
13947 {
13948 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13949 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13950 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13951 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13952 default: abort ();
13953 }
13954 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13955 inst.instruction |= inst.operands[0].reg;
13956 inst.instruction |= inst.operands[1].reg << 3;
13957 }
13958 }
13959}
13960
13961static void
13962do_t_simd (void)
13963{
13964 unsigned Rd, Rn, Rm;
13965
13966 Rd = inst.operands[0].reg;
13967 Rn = inst.operands[1].reg;
13968 Rm = inst.operands[2].reg;
13969
13970 reject_bad_reg (Rd);
13971 reject_bad_reg (Rn);
13972 reject_bad_reg (Rm);
13973
13974 inst.instruction |= Rd << 8;
13975 inst.instruction |= Rn << 16;
13976 inst.instruction |= Rm;
13977}
13978
13979static void
13980do_t_simd2 (void)
13981{
13982 unsigned Rd, Rn, Rm;
13983
13984 Rd = inst.operands[0].reg;
13985 Rm = inst.operands[1].reg;
13986 Rn = inst.operands[2].reg;
13987
13988 reject_bad_reg (Rd);
13989 reject_bad_reg (Rn);
13990 reject_bad_reg (Rm);
13991
13992 inst.instruction |= Rd << 8;
13993 inst.instruction |= Rn << 16;
13994 inst.instruction |= Rm;
13995}
13996
13997static void
13998do_t_smc (void)
13999{
14000 unsigned int value = inst.relocs[0].exp.X_add_number;
14001 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14002 _("SMC is not permitted on this architecture"));
14003 constraint (inst.relocs[0].exp.X_op != O_constant,
14004 _("expression too complex"));
14005 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14006
14007 inst.relocs[0].type = BFD_RELOC_UNUSED;
14008 inst.instruction |= (value & 0x000f) << 16;
14009
14010 /* PR gas/15623: SMC instructions must be last in an IT block. */
14011 set_pred_insn_type_last ();
14012}
14013
14014static void
14015do_t_hvc (void)
14016{
14017 unsigned int value = inst.relocs[0].exp.X_add_number;
14018
14019 inst.relocs[0].type = BFD_RELOC_UNUSED;
14020 inst.instruction |= (value & 0x0fff);
14021 inst.instruction |= (value & 0xf000) << 4;
14022}
14023
14024static void
14025do_t_ssat_usat (int bias)
14026{
14027 unsigned Rd, Rn;
14028
14029 Rd = inst.operands[0].reg;
14030 Rn = inst.operands[2].reg;
14031
14032 reject_bad_reg (Rd);
14033 reject_bad_reg (Rn);
14034
14035 inst.instruction |= Rd << 8;
14036 inst.instruction |= inst.operands[1].imm - bias;
14037 inst.instruction |= Rn << 16;
14038
14039 if (inst.operands[3].present)
14040 {
14041 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14042
14043 inst.relocs[0].type = BFD_RELOC_UNUSED;
14044
14045 constraint (inst.relocs[0].exp.X_op != O_constant,
14046 _("expression too complex"));
14047
14048 if (shift_amount != 0)
14049 {
14050 constraint (shift_amount > 31,
14051 _("shift expression is too large"));
14052
14053 if (inst.operands[3].shift_kind == SHIFT_ASR)
14054 inst.instruction |= 0x00200000; /* sh bit. */
14055
14056 inst.instruction |= (shift_amount & 0x1c) << 10;
14057 inst.instruction |= (shift_amount & 0x03) << 6;
14058 }
14059 }
14060}
14061
14062static void
14063do_t_ssat (void)
14064{
14065 do_t_ssat_usat (1);
14066}
14067
14068static void
14069do_t_ssat16 (void)
14070{
14071 unsigned Rd, Rn;
14072
14073 Rd = inst.operands[0].reg;
14074 Rn = inst.operands[2].reg;
14075
14076 reject_bad_reg (Rd);
14077 reject_bad_reg (Rn);
14078
14079 inst.instruction |= Rd << 8;
14080 inst.instruction |= inst.operands[1].imm - 1;
14081 inst.instruction |= Rn << 16;
14082}
14083
14084static void
14085do_t_strex (void)
14086{
14087 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14088 || inst.operands[2].postind || inst.operands[2].writeback
14089 || inst.operands[2].immisreg || inst.operands[2].shifted
14090 || inst.operands[2].negative,
14091 BAD_ADDR_MODE);
14092
14093 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14094
14095 inst.instruction |= inst.operands[0].reg << 8;
14096 inst.instruction |= inst.operands[1].reg << 12;
14097 inst.instruction |= inst.operands[2].reg << 16;
14098 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14099}
14100
14101static void
14102do_t_strexd (void)
14103{
14104 if (!inst.operands[2].present)
14105 inst.operands[2].reg = inst.operands[1].reg + 1;
14106
14107 constraint (inst.operands[0].reg == inst.operands[1].reg
14108 || inst.operands[0].reg == inst.operands[2].reg
14109 || inst.operands[0].reg == inst.operands[3].reg,
14110 BAD_OVERLAP);
14111
14112 inst.instruction |= inst.operands[0].reg;
14113 inst.instruction |= inst.operands[1].reg << 12;
14114 inst.instruction |= inst.operands[2].reg << 8;
14115 inst.instruction |= inst.operands[3].reg << 16;
14116}
14117
14118static void
14119do_t_sxtah (void)
14120{
14121 unsigned Rd, Rn, Rm;
14122
14123 Rd = inst.operands[0].reg;
14124 Rn = inst.operands[1].reg;
14125 Rm = inst.operands[2].reg;
14126
14127 reject_bad_reg (Rd);
14128 reject_bad_reg (Rn);
14129 reject_bad_reg (Rm);
14130
14131 inst.instruction |= Rd << 8;
14132 inst.instruction |= Rn << 16;
14133 inst.instruction |= Rm;
14134 inst.instruction |= inst.operands[3].imm << 4;
14135}
14136
14137static void
14138do_t_sxth (void)
14139{
14140 unsigned Rd, Rm;
14141
14142 Rd = inst.operands[0].reg;
14143 Rm = inst.operands[1].reg;
14144
14145 reject_bad_reg (Rd);
14146 reject_bad_reg (Rm);
14147
14148 if (inst.instruction <= 0xffff
14149 && inst.size_req != 4
14150 && Rd <= 7 && Rm <= 7
14151 && (!inst.operands[2].present || inst.operands[2].imm == 0))
14152 {
14153 inst.instruction = THUMB_OP16 (inst.instruction);
14154 inst.instruction |= Rd;
14155 inst.instruction |= Rm << 3;
14156 }
14157 else if (unified_syntax)
14158 {
14159 if (inst.instruction <= 0xffff)
14160 inst.instruction = THUMB_OP32 (inst.instruction);
14161 inst.instruction |= Rd << 8;
14162 inst.instruction |= Rm;
14163 inst.instruction |= inst.operands[2].imm << 4;
14164 }
14165 else
14166 {
14167 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14168 _("Thumb encoding does not support rotation"));
14169 constraint (1, BAD_HIREG);
14170 }
14171}
14172
14173static void
14174do_t_swi (void)
14175{
14176 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14177}
14178
14179static void
14180do_t_tb (void)
14181{
14182 unsigned Rn, Rm;
14183 int half;
14184
14185 half = (inst.instruction & 0x10) != 0;
14186 set_pred_insn_type_last ();
14187 constraint (inst.operands[0].immisreg,
14188 _("instruction requires register index"));
14189
14190 Rn = inst.operands[0].reg;
14191 Rm = inst.operands[0].imm;
14192
14193 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14194 constraint (Rn == REG_SP, BAD_SP);
14195 reject_bad_reg (Rm);
14196
14197 constraint (!half && inst.operands[0].shifted,
14198 _("instruction does not allow shifted index"));
14199 inst.instruction |= (Rn << 16) | Rm;
14200}
14201
14202static void
14203do_t_udf (void)
14204{
14205 if (!inst.operands[0].present)
14206 inst.operands[0].imm = 0;
14207
14208 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14209 {
14210 constraint (inst.size_req == 2,
14211 _("immediate value out of range"));
14212 inst.instruction = THUMB_OP32 (inst.instruction);
14213 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14214 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14215 }
14216 else
14217 {
14218 inst.instruction = THUMB_OP16 (inst.instruction);
14219 inst.instruction |= inst.operands[0].imm;
14220 }
14221
14222 set_pred_insn_type (NEUTRAL_IT_INSN);
14223}
14224
14225
14226static void
14227do_t_usat (void)
14228{
14229 do_t_ssat_usat (0);
14230}
14231
14232static void
14233do_t_usat16 (void)
14234{
14235 unsigned Rd, Rn;
14236
14237 Rd = inst.operands[0].reg;
14238 Rn = inst.operands[2].reg;
14239
14240 reject_bad_reg (Rd);
14241 reject_bad_reg (Rn);
14242
14243 inst.instruction |= Rd << 8;
14244 inst.instruction |= inst.operands[1].imm;
14245 inst.instruction |= Rn << 16;
14246}
14247
14248/* Checking the range of the branch offset (VAL) with NBITS bits
14249 and IS_SIGNED signedness. Also checks the LSB to be 0. */
14250static int
14251v8_1_branch_value_check (int val, int nbits, int is_signed)
14252{
14253 gas_assert (nbits > 0 && nbits <= 32);
14254 if (is_signed)
14255 {
14256 int cmp = (1 << (nbits - 1));
14257 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14258 return FAIL;
14259 }
14260 else
14261 {
14262 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14263 return FAIL;
14264 }
14265 return SUCCESS;
14266}
14267
14268/* For branches in Armv8.1-M Mainline. */
14269static void
14270do_t_branch_future (void)
14271{
14272 unsigned long insn = inst.instruction;
14273
14274 inst.instruction = THUMB_OP32 (inst.instruction);
14275 if (inst.operands[0].hasreloc == 0)
14276 {
14277 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
14278 as_bad (BAD_BRANCH_OFF);
14279
14280 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14281 }
14282 else
14283 {
14284 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14285 inst.relocs[0].pc_rel = 1;
14286 }
14287
14288 switch (insn)
14289 {
14290 case T_MNEM_bf:
14291 if (inst.operands[1].hasreloc == 0)
14292 {
14293 int val = inst.operands[1].imm;
14294 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
14295 as_bad (BAD_BRANCH_OFF);
14296
14297 int immA = (val & 0x0001f000) >> 12;
14298 int immB = (val & 0x00000ffc) >> 2;
14299 int immC = (val & 0x00000002) >> 1;
14300 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14301 }
14302 else
14303 {
14304 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14305 inst.relocs[1].pc_rel = 1;
14306 }
14307 break;
14308
14309 case T_MNEM_bfl:
14310 if (inst.operands[1].hasreloc == 0)
14311 {
14312 int val = inst.operands[1].imm;
14313 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
14314 as_bad (BAD_BRANCH_OFF);
14315
14316 int immA = (val & 0x0007f000) >> 12;
14317 int immB = (val & 0x00000ffc) >> 2;
14318 int immC = (val & 0x00000002) >> 1;
14319 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14320 }
14321 else
14322 {
14323 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14324 inst.relocs[1].pc_rel = 1;
14325 }
14326 break;
14327
14328 case T_MNEM_bfcsel:
14329 /* Operand 1. */
14330 if (inst.operands[1].hasreloc == 0)
14331 {
14332 int val = inst.operands[1].imm;
14333 int immA = (val & 0x00001000) >> 12;
14334 int immB = (val & 0x00000ffc) >> 2;
14335 int immC = (val & 0x00000002) >> 1;
14336 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14337 }
14338 else
14339 {
14340 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14341 inst.relocs[1].pc_rel = 1;
14342 }
14343
14344 /* Operand 2. */
14345 if (inst.operands[2].hasreloc == 0)
14346 {
14347 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14348 int val2 = inst.operands[2].imm;
14349 int val0 = inst.operands[0].imm & 0x1f;
14350 int diff = val2 - val0;
14351 if (diff == 4)
14352 inst.instruction |= 1 << 17; /* T bit. */
14353 else if (diff != 2)
14354 as_bad (_("out of range label-relative fixup value"));
14355 }
14356 else
14357 {
14358 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14359 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14360 inst.relocs[2].pc_rel = 1;
14361 }
14362
14363 /* Operand 3. */
14364 constraint (inst.cond != COND_ALWAYS, BAD_COND);
14365 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14366 break;
14367
14368 case T_MNEM_bfx:
14369 case T_MNEM_bflx:
14370 inst.instruction |= inst.operands[1].reg << 16;
14371 break;
14372
14373 default: abort ();
14374 }
14375}
14376
14377/* Helper function for do_t_loloop to handle relocations. */
14378static void
14379v8_1_loop_reloc (int is_le)
14380{
14381 if (inst.relocs[0].exp.X_op == O_constant)
14382 {
14383 int value = inst.relocs[0].exp.X_add_number;
14384 value = (is_le) ? -value : value;
14385
14386 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
14387 as_bad (BAD_BRANCH_OFF);
14388
14389 int imml, immh;
14390
14391 immh = (value & 0x00000ffc) >> 2;
14392 imml = (value & 0x00000002) >> 1;
14393
14394 inst.instruction |= (imml << 11) | (immh << 1);
14395 }
14396 else
14397 {
14398 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14399 inst.relocs[0].pc_rel = 1;
14400 }
14401}
14402
14403/* For shifts with four operands in MVE. */
14404static void
14405do_mve_scalar_shift1 (void)
14406{
14407 unsigned int value = inst.operands[2].imm;
14408
14409 inst.instruction |= inst.operands[0].reg << 16;
14410 inst.instruction |= inst.operands[1].reg << 8;
14411
14412 /* Setting the bit for saturation. */
14413 inst.instruction |= ((value == 64) ? 0: 1) << 7;
14414
14415 /* Assuming Rm is already checked not to be 11x1. */
14416 constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14417 constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14418 inst.instruction |= inst.operands[3].reg << 12;
14419}
14420
14421/* For shifts in MVE. */
14422static void
14423do_mve_scalar_shift (void)
14424{
14425 if (!inst.operands[2].present)
14426 {
14427 inst.operands[2] = inst.operands[1];
14428 inst.operands[1].reg = 0xf;
14429 }
14430
14431 inst.instruction |= inst.operands[0].reg << 16;
14432 inst.instruction |= inst.operands[1].reg << 8;
14433
14434 if (inst.operands[2].isreg)
14435 {
14436 /* Assuming Rm is already checked not to be 11x1. */
14437 constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14438 constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14439 inst.instruction |= inst.operands[2].reg << 12;
14440 }
14441 else
14442 {
14443 /* Assuming imm is already checked as [1,32]. */
14444 unsigned int value = inst.operands[2].imm;
14445 inst.instruction |= (value & 0x1c) << 10;
14446 inst.instruction |= (value & 0x03) << 6;
14447 /* Change last 4 bits from 0xd to 0xf. */
14448 inst.instruction |= 0x2;
14449 }
14450}
14451
14452/* MVE instruction encoder helpers. */
14453#define M_MNEM_vabav 0xee800f01
14454#define M_MNEM_vmladav 0xeef00e00
14455#define M_MNEM_vmladava 0xeef00e20
14456#define M_MNEM_vmladavx 0xeef01e00
14457#define M_MNEM_vmladavax 0xeef01e20
14458#define M_MNEM_vmlsdav 0xeef00e01
14459#define M_MNEM_vmlsdava 0xeef00e21
14460#define M_MNEM_vmlsdavx 0xeef01e01
14461#define M_MNEM_vmlsdavax 0xeef01e21
14462#define M_MNEM_vmullt 0xee011e00
14463#define M_MNEM_vmullb 0xee010e00
14464#define M_MNEM_vctp 0xf000e801
14465#define M_MNEM_vst20 0xfc801e00
14466#define M_MNEM_vst21 0xfc801e20
14467#define M_MNEM_vst40 0xfc801e01
14468#define M_MNEM_vst41 0xfc801e21
14469#define M_MNEM_vst42 0xfc801e41
14470#define M_MNEM_vst43 0xfc801e61
14471#define M_MNEM_vld20 0xfc901e00
14472#define M_MNEM_vld21 0xfc901e20
14473#define M_MNEM_vld40 0xfc901e01
14474#define M_MNEM_vld41 0xfc901e21
14475#define M_MNEM_vld42 0xfc901e41
14476#define M_MNEM_vld43 0xfc901e61
14477#define M_MNEM_vstrb 0xec000e00
14478#define M_MNEM_vstrh 0xec000e10
14479#define M_MNEM_vstrw 0xec000e40
14480#define M_MNEM_vstrd 0xec000e50
14481#define M_MNEM_vldrb 0xec100e00
14482#define M_MNEM_vldrh 0xec100e10
14483#define M_MNEM_vldrw 0xec100e40
14484#define M_MNEM_vldrd 0xec100e50
14485#define M_MNEM_vmovlt 0xeea01f40
14486#define M_MNEM_vmovlb 0xeea00f40
14487#define M_MNEM_vmovnt 0xfe311e81
14488#define M_MNEM_vmovnb 0xfe310e81
14489#define M_MNEM_vadc 0xee300f00
14490#define M_MNEM_vadci 0xee301f00
14491#define M_MNEM_vbrsr 0xfe011e60
14492#define M_MNEM_vaddlv 0xee890f00
14493#define M_MNEM_vaddlva 0xee890f20
14494#define M_MNEM_vaddv 0xeef10f00
14495#define M_MNEM_vaddva 0xeef10f20
14496#define M_MNEM_vddup 0xee011f6e
14497#define M_MNEM_vdwdup 0xee011f60
14498#define M_MNEM_vidup 0xee010f6e
14499#define M_MNEM_viwdup 0xee010f60
14500#define M_MNEM_vmaxv 0xeee20f00
14501#define M_MNEM_vmaxav 0xeee00f00
14502#define M_MNEM_vminv 0xeee20f80
14503#define M_MNEM_vminav 0xeee00f80
14504#define M_MNEM_vmlaldav 0xee800e00
14505#define M_MNEM_vmlaldava 0xee800e20
14506#define M_MNEM_vmlaldavx 0xee801e00
14507#define M_MNEM_vmlaldavax 0xee801e20
14508#define M_MNEM_vmlsldav 0xee800e01
14509#define M_MNEM_vmlsldava 0xee800e21
14510#define M_MNEM_vmlsldavx 0xee801e01
14511#define M_MNEM_vmlsldavax 0xee801e21
14512#define M_MNEM_vrmlaldavhx 0xee801f00
14513#define M_MNEM_vrmlaldavhax 0xee801f20
14514#define M_MNEM_vrmlsldavh 0xfe800e01
14515#define M_MNEM_vrmlsldavha 0xfe800e21
14516#define M_MNEM_vrmlsldavhx 0xfe801e01
14517#define M_MNEM_vrmlsldavhax 0xfe801e21
14518#define M_MNEM_vqmovnt 0xee331e01
14519#define M_MNEM_vqmovnb 0xee330e01
14520#define M_MNEM_vqmovunt 0xee311e81
14521#define M_MNEM_vqmovunb 0xee310e81
14522#define M_MNEM_vshrnt 0xee801fc1
14523#define M_MNEM_vshrnb 0xee800fc1
14524#define M_MNEM_vrshrnt 0xfe801fc1
14525#define M_MNEM_vqshrnt 0xee801f40
14526#define M_MNEM_vqshrnb 0xee800f40
14527#define M_MNEM_vqshrunt 0xee801fc0
14528#define M_MNEM_vqshrunb 0xee800fc0
14529#define M_MNEM_vrshrnb 0xfe800fc1
14530#define M_MNEM_vqrshrnt 0xee801f41
14531#define M_MNEM_vqrshrnb 0xee800f41
14532#define M_MNEM_vqrshrunt 0xfe801fc0
14533#define M_MNEM_vqrshrunb 0xfe800fc0
14534
14535/* Bfloat16 instruction encoder helpers. */
14536#define B_MNEM_vfmat 0xfc300850
14537#define B_MNEM_vfmab 0xfc300810
14538
14539/* Neon instruction encoder helpers. */
14540
14541/* Encodings for the different types for various Neon opcodes. */
14542
14543/* An "invalid" code for the following tables. */
14544#define N_INV -1u
14545
14546struct neon_tab_entry
14547{
14548 unsigned integer;
14549 unsigned float_or_poly;
14550 unsigned scalar_or_imm;
14551};
14552
14553/* Map overloaded Neon opcodes to their respective encodings. */
14554#define NEON_ENC_TAB \
14555 X(vabd, 0x0000700, 0x1200d00, N_INV), \
14556 X(vabdl, 0x0800700, N_INV, N_INV), \
14557 X(vmax, 0x0000600, 0x0000f00, N_INV), \
14558 X(vmin, 0x0000610, 0x0200f00, N_INV), \
14559 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
14560 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
14561 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
14562 X(vadd, 0x0000800, 0x0000d00, N_INV), \
14563 X(vaddl, 0x0800000, N_INV, N_INV), \
14564 X(vsub, 0x1000800, 0x0200d00, N_INV), \
14565 X(vsubl, 0x0800200, N_INV, N_INV), \
14566 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
14567 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
14568 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
14569 /* Register variants of the following two instructions are encoded as
14570 vcge / vcgt with the operands reversed. */ \
14571 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
14572 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
14573 X(vfma, N_INV, 0x0000c10, N_INV), \
14574 X(vfms, N_INV, 0x0200c10, N_INV), \
14575 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14576 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14577 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14578 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14579 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14580 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14581 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14582 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14583 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14584 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14585 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14586 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14587 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14588 X(vshl, 0x0000400, N_INV, 0x0800510), \
14589 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14590 X(vand, 0x0000110, N_INV, 0x0800030), \
14591 X(vbic, 0x0100110, N_INV, 0x0800030), \
14592 X(veor, 0x1000110, N_INV, N_INV), \
14593 X(vorn, 0x0300110, N_INV, 0x0800010), \
14594 X(vorr, 0x0200110, N_INV, 0x0800010), \
14595 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14596 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14597 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14598 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14599 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14600 X(vst1, 0x0000000, 0x0800000, N_INV), \
14601 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14602 X(vst2, 0x0000100, 0x0800100, N_INV), \
14603 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14604 X(vst3, 0x0000200, 0x0800200, N_INV), \
14605 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14606 X(vst4, 0x0000300, 0x0800300, N_INV), \
14607 X(vmovn, 0x1b20200, N_INV, N_INV), \
14608 X(vtrn, 0x1b20080, N_INV, N_INV), \
14609 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14610 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14611 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14612 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14613 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14614 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14615 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14616 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14617 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14618 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14619 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14620 X(vseleq, 0xe000a00, N_INV, N_INV), \
14621 X(vselvs, 0xe100a00, N_INV, N_INV), \
14622 X(vselge, 0xe200a00, N_INV, N_INV), \
14623 X(vselgt, 0xe300a00, N_INV, N_INV), \
14624 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14625 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14626 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14627 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14628 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14629 X(aes, 0x3b00300, N_INV, N_INV), \
14630 X(sha3op, 0x2000c00, N_INV, N_INV), \
14631 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14632 X(sha2op, 0x3ba0380, N_INV, N_INV)
14633
14634enum neon_opc
14635{
14636#define X(OPC,I,F,S) N_MNEM_##OPC
14637NEON_ENC_TAB
14638#undef X
14639};
14640
14641static const struct neon_tab_entry neon_enc_tab[] =
14642{
14643#define X(OPC,I,F,S) { (I), (F), (S) }
14644NEON_ENC_TAB
14645#undef X
14646};
14647
14648/* Do not use these macros; instead, use NEON_ENCODE defined below. */
14649#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14650#define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14651#define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14652#define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14653#define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14654#define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14655#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14656#define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14657#define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14658#define NEON_ENC_SINGLE_(X) \
14659 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14660#define NEON_ENC_DOUBLE_(X) \
14661 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14662#define NEON_ENC_FPV8_(X) \
14663 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14664
14665#define NEON_ENCODE(type, inst) \
14666 do \
14667 { \
14668 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14669 inst.is_neon = 1; \
14670 } \
14671 while (0)
14672
14673#define check_neon_suffixes \
14674 do \
14675 { \
14676 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14677 { \
14678 as_bad (_("invalid neon suffix for non neon instruction")); \
14679 return; \
14680 } \
14681 } \
14682 while (0)
14683
14684/* Define shapes for instruction operands. The following mnemonic characters
14685 are used in this table:
14686
14687 F - VFP S<n> register
14688 D - Neon D<n> register
14689 Q - Neon Q<n> register
14690 I - Immediate
14691 S - Scalar
14692 R - ARM register
14693 L - D<n> register list
14694
14695 This table is used to generate various data:
14696 - enumerations of the form NS_DDR to be used as arguments to
14697 neon_select_shape.
14698 - a table classifying shapes into single, double, quad, mixed.
14699 - a table used to drive neon_select_shape. */
14700
14701#define NEON_SHAPE_DEF \
14702 X(4, (R, R, Q, Q), QUAD), \
14703 X(4, (Q, R, R, I), QUAD), \
14704 X(4, (R, R, S, S), QUAD), \
14705 X(4, (S, S, R, R), QUAD), \
14706 X(3, (Q, R, I), QUAD), \
14707 X(3, (I, Q, Q), QUAD), \
14708 X(3, (I, Q, R), QUAD), \
14709 X(3, (R, Q, Q), QUAD), \
14710 X(3, (D, D, D), DOUBLE), \
14711 X(3, (Q, Q, Q), QUAD), \
14712 X(3, (D, D, I), DOUBLE), \
14713 X(3, (Q, Q, I), QUAD), \
14714 X(3, (D, D, S), DOUBLE), \
14715 X(3, (Q, Q, S), QUAD), \
14716 X(3, (Q, Q, R), QUAD), \
14717 X(3, (R, R, Q), QUAD), \
14718 X(2, (R, Q), QUAD), \
14719 X(2, (D, D), DOUBLE), \
14720 X(2, (Q, Q), QUAD), \
14721 X(2, (D, S), DOUBLE), \
14722 X(2, (Q, S), QUAD), \
14723 X(2, (D, R), DOUBLE), \
14724 X(2, (Q, R), QUAD), \
14725 X(2, (D, I), DOUBLE), \
14726 X(2, (Q, I), QUAD), \
14727 X(3, (D, L, D), DOUBLE), \
14728 X(2, (D, Q), MIXED), \
14729 X(2, (Q, D), MIXED), \
14730 X(3, (D, Q, I), MIXED), \
14731 X(3, (Q, D, I), MIXED), \
14732 X(3, (Q, D, D), MIXED), \
14733 X(3, (D, Q, Q), MIXED), \
14734 X(3, (Q, Q, D), MIXED), \
14735 X(3, (Q, D, S), MIXED), \
14736 X(3, (D, Q, S), MIXED), \
14737 X(4, (D, D, D, I), DOUBLE), \
14738 X(4, (Q, Q, Q, I), QUAD), \
14739 X(4, (D, D, S, I), DOUBLE), \
14740 X(4, (Q, Q, S, I), QUAD), \
14741 X(2, (F, F), SINGLE), \
14742 X(3, (F, F, F), SINGLE), \
14743 X(2, (F, I), SINGLE), \
14744 X(2, (F, D), MIXED), \
14745 X(2, (D, F), MIXED), \
14746 X(3, (F, F, I), MIXED), \
14747 X(4, (R, R, F, F), SINGLE), \
14748 X(4, (F, F, R, R), SINGLE), \
14749 X(3, (D, R, R), DOUBLE), \
14750 X(3, (R, R, D), DOUBLE), \
14751 X(2, (S, R), SINGLE), \
14752 X(2, (R, S), SINGLE), \
14753 X(2, (F, R), SINGLE), \
14754 X(2, (R, F), SINGLE), \
14755/* Used for MVE tail predicated loop instructions. */\
14756 X(2, (R, R), QUAD), \
14757/* Half float shape supported so far. */\
14758 X (2, (H, D), MIXED), \
14759 X (2, (D, H), MIXED), \
14760 X (2, (H, F), MIXED), \
14761 X (2, (F, H), MIXED), \
14762 X (2, (H, H), HALF), \
14763 X (2, (H, R), HALF), \
14764 X (2, (R, H), HALF), \
14765 X (2, (H, I), HALF), \
14766 X (3, (H, H, H), HALF), \
14767 X (3, (H, F, I), MIXED), \
14768 X (3, (F, H, I), MIXED), \
14769 X (3, (D, H, H), MIXED), \
14770 X (3, (D, H, S), MIXED)
14771
14772#define S2(A,B) NS_##A##B
14773#define S3(A,B,C) NS_##A##B##C
14774#define S4(A,B,C,D) NS_##A##B##C##D
14775
14776#define X(N, L, C) S##N L
14777
14778enum neon_shape
14779{
14780 NEON_SHAPE_DEF,
14781 NS_NULL
14782};
14783
14784#undef X
14785#undef S2
14786#undef S3
14787#undef S4
14788
14789enum neon_shape_class
14790{
14791 SC_HALF,
14792 SC_SINGLE,
14793 SC_DOUBLE,
14794 SC_QUAD,
14795 SC_MIXED
14796};
14797
14798#define X(N, L, C) SC_##C
14799
14800static enum neon_shape_class neon_shape_class[] =
14801{
14802 NEON_SHAPE_DEF
14803};
14804
14805#undef X
14806
14807enum neon_shape_el
14808{
14809 SE_H,
14810 SE_F,
14811 SE_D,
14812 SE_Q,
14813 SE_I,
14814 SE_S,
14815 SE_R,
14816 SE_L
14817};
14818
14819/* Register widths of above. */
14820static unsigned neon_shape_el_size[] =
14821{
14822 16,
14823 32,
14824 64,
14825 128,
14826 0,
14827 32,
14828 32,
14829 0
14830};
14831
14832struct neon_shape_info
14833{
14834 unsigned els;
14835 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14836};
14837
14838#define S2(A,B) { SE_##A, SE_##B }
14839#define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14840#define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14841
14842#define X(N, L, C) { N, S##N L }
14843
14844static struct neon_shape_info neon_shape_tab[] =
14845{
14846 NEON_SHAPE_DEF
14847};
14848
14849#undef X
14850#undef S2
14851#undef S3
14852#undef S4
14853
14854/* Bit masks used in type checking given instructions.
14855 'N_EQK' means the type must be the same as (or based on in some way) the key
14856 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14857 set, various other bits can be set as well in order to modify the meaning of
14858 the type constraint. */
14859
14860enum neon_type_mask
14861{
14862 N_S8 = 0x0000001,
14863 N_S16 = 0x0000002,
14864 N_S32 = 0x0000004,
14865 N_S64 = 0x0000008,
14866 N_U8 = 0x0000010,
14867 N_U16 = 0x0000020,
14868 N_U32 = 0x0000040,
14869 N_U64 = 0x0000080,
14870 N_I8 = 0x0000100,
14871 N_I16 = 0x0000200,
14872 N_I32 = 0x0000400,
14873 N_I64 = 0x0000800,
14874 N_8 = 0x0001000,
14875 N_16 = 0x0002000,
14876 N_32 = 0x0004000,
14877 N_64 = 0x0008000,
14878 N_P8 = 0x0010000,
14879 N_P16 = 0x0020000,
14880 N_F16 = 0x0040000,
14881 N_F32 = 0x0080000,
14882 N_F64 = 0x0100000,
14883 N_P64 = 0x0200000,
14884 N_BF16 = 0x0400000,
14885 N_KEY = 0x1000000, /* Key element (main type specifier). */
14886 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14887 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14888 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14889 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14890 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14891 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14892 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14893 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14894 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14895 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14896 N_UTYP = 0,
14897 N_MAX_NONSPECIAL = N_P64
14898};
14899
14900#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14901
14902#define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14903#define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14904#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14905#define N_S_32 (N_S8 | N_S16 | N_S32)
14906#define N_F_16_32 (N_F16 | N_F32)
14907#define N_SUF_32 (N_SU_32 | N_F_16_32)
14908#define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14909#define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14910#define N_F_ALL (N_F16 | N_F32 | N_F64)
14911#define N_I_MVE (N_I8 | N_I16 | N_I32)
14912#define N_F_MVE (N_F16 | N_F32)
14913#define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14914
14915/* Pass this as the first type argument to neon_check_type to ignore types
14916 altogether. */
14917#define N_IGNORE_TYPE (N_KEY | N_EQK)
14918
14919/* Select a "shape" for the current instruction (describing register types or
14920 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14921 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14922 function of operand parsing, so this function doesn't need to be called.
14923 Shapes should be listed in order of decreasing length. */
14924
14925static enum neon_shape
14926neon_select_shape (enum neon_shape shape, ...)
14927{
14928 va_list ap;
14929 enum neon_shape first_shape = shape;
14930
14931 /* Fix missing optional operands. FIXME: we don't know at this point how
14932 many arguments we should have, so this makes the assumption that we have
14933 > 1. This is true of all current Neon opcodes, I think, but may not be
14934 true in the future. */
14935 if (!inst.operands[1].present)
14936 inst.operands[1] = inst.operands[0];
14937
14938 va_start (ap, shape);
14939
14940 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14941 {
14942 unsigned j;
14943 int matches = 1;
14944
14945 for (j = 0; j < neon_shape_tab[shape].els; j++)
14946 {
14947 if (!inst.operands[j].present)
14948 {
14949 matches = 0;
14950 break;
14951 }
14952
14953 switch (neon_shape_tab[shape].el[j])
14954 {
14955 /* If a .f16, .16, .u16, .s16 type specifier is given over
14956 a VFP single precision register operand, it's essentially
14957 means only half of the register is used.
14958
14959 If the type specifier is given after the mnemonics, the
14960 information is stored in inst.vectype. If the type specifier
14961 is given after register operand, the information is stored
14962 in inst.operands[].vectype.
14963
14964 When there is only one type specifier, and all the register
14965 operands are the same type of hardware register, the type
14966 specifier applies to all register operands.
14967
14968 If no type specifier is given, the shape is inferred from
14969 operand information.
14970
14971 for example:
14972 vadd.f16 s0, s1, s2: NS_HHH
14973 vabs.f16 s0, s1: NS_HH
14974 vmov.f16 s0, r1: NS_HR
14975 vmov.f16 r0, s1: NS_RH
14976 vcvt.f16 r0, s1: NS_RH
14977 vcvt.f16.s32 s2, s2, #29: NS_HFI
14978 vcvt.f16.s32 s2, s2: NS_HF
14979 */
14980 case SE_H:
14981 if (!(inst.operands[j].isreg
14982 && inst.operands[j].isvec
14983 && inst.operands[j].issingle
14984 && !inst.operands[j].isquad
14985 && ((inst.vectype.elems == 1
14986 && inst.vectype.el[0].size == 16)
14987 || (inst.vectype.elems > 1
14988 && inst.vectype.el[j].size == 16)
14989 || (inst.vectype.elems == 0
14990 && inst.operands[j].vectype.type != NT_invtype
14991 && inst.operands[j].vectype.size == 16))))
14992 matches = 0;
14993 break;
14994
14995 case SE_F:
14996 if (!(inst.operands[j].isreg
14997 && inst.operands[j].isvec
14998 && inst.operands[j].issingle
14999 && !inst.operands[j].isquad
15000 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15001 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15002 || (inst.vectype.elems == 0
15003 && (inst.operands[j].vectype.size == 32
15004 || inst.operands[j].vectype.type == NT_invtype)))))
15005 matches = 0;
15006 break;
15007
15008 case SE_D:
15009 if (!(inst.operands[j].isreg
15010 && inst.operands[j].isvec
15011 && !inst.operands[j].isquad
15012 && !inst.operands[j].issingle))
15013 matches = 0;
15014 break;
15015
15016 case SE_R:
15017 if (!(inst.operands[j].isreg
15018 && !inst.operands[j].isvec))
15019 matches = 0;
15020 break;
15021
15022 case SE_Q:
15023 if (!(inst.operands[j].isreg
15024 && inst.operands[j].isvec
15025 && inst.operands[j].isquad
15026 && !inst.operands[j].issingle))
15027 matches = 0;
15028 break;
15029
15030 case SE_I:
15031 if (!(!inst.operands[j].isreg
15032 && !inst.operands[j].isscalar))
15033 matches = 0;
15034 break;
15035
15036 case SE_S:
15037 if (!(!inst.operands[j].isreg
15038 && inst.operands[j].isscalar))
15039 matches = 0;
15040 break;
15041
15042 case SE_L:
15043 break;
15044 }
15045 if (!matches)
15046 break;
15047 }
15048 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15049 /* We've matched all the entries in the shape table, and we don't
15050 have any left over operands which have not been matched. */
15051 break;
15052 }
15053
15054 va_end (ap);
15055
15056 if (shape == NS_NULL && first_shape != NS_NULL)
15057 first_error (_("invalid instruction shape"));
15058
15059 return shape;
15060}
15061
15062/* True if SHAPE is predominantly a quadword operation (most of the time, this
15063 means the Q bit should be set). */
15064
15065static int
15066neon_quad (enum neon_shape shape)
15067{
15068 return neon_shape_class[shape] == SC_QUAD;
15069}
15070
15071static void
15072neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15073 unsigned *g_size)
15074{
15075 /* Allow modification to be made to types which are constrained to be
15076 based on the key element, based on bits set alongside N_EQK. */
15077 if ((typebits & N_EQK) != 0)
15078 {
15079 if ((typebits & N_HLF) != 0)
15080 *g_size /= 2;
15081 else if ((typebits & N_DBL) != 0)
15082 *g_size *= 2;
15083 if ((typebits & N_SGN) != 0)
15084 *g_type = NT_signed;
15085 else if ((typebits & N_UNS) != 0)
15086 *g_type = NT_unsigned;
15087 else if ((typebits & N_INT) != 0)
15088 *g_type = NT_integer;
15089 else if ((typebits & N_FLT) != 0)
15090 *g_type = NT_float;
15091 else if ((typebits & N_SIZ) != 0)
15092 *g_type = NT_untyped;
15093 }
15094}
15095
15096/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15097 operand type, i.e. the single type specified in a Neon instruction when it
15098 is the only one given. */
15099
15100static struct neon_type_el
15101neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15102{
15103 struct neon_type_el dest = *key;
15104
15105 gas_assert ((thisarg & N_EQK) != 0);
15106
15107 neon_modify_type_size (thisarg, &dest.type, &dest.size);
15108
15109 return dest;
15110}
15111
15112/* Convert Neon type and size into compact bitmask representation. */
15113
15114static enum neon_type_mask
15115type_chk_of_el_type (enum neon_el_type type, unsigned size)
15116{
15117 switch (type)
15118 {
15119 case NT_untyped:
15120 switch (size)
15121 {
15122 case 8: return N_8;
15123 case 16: return N_16;
15124 case 32: return N_32;
15125 case 64: return N_64;
15126 default: ;
15127 }
15128 break;
15129
15130 case NT_integer:
15131 switch (size)
15132 {
15133 case 8: return N_I8;
15134 case 16: return N_I16;
15135 case 32: return N_I32;
15136 case 64: return N_I64;
15137 default: ;
15138 }
15139 break;
15140
15141 case NT_float:
15142 switch (size)
15143 {
15144 case 16: return N_F16;
15145 case 32: return N_F32;
15146 case 64: return N_F64;
15147 default: ;
15148 }
15149 break;
15150
15151 case NT_poly:
15152 switch (size)
15153 {
15154 case 8: return N_P8;
15155 case 16: return N_P16;
15156 case 64: return N_P64;
15157 default: ;
15158 }
15159 break;
15160
15161 case NT_signed:
15162 switch (size)
15163 {
15164 case 8: return N_S8;
15165 case 16: return N_S16;
15166 case 32: return N_S32;
15167 case 64: return N_S64;
15168 default: ;
15169 }
15170 break;
15171
15172 case NT_unsigned:
15173 switch (size)
15174 {
15175 case 8: return N_U8;
15176 case 16: return N_U16;
15177 case 32: return N_U32;
15178 case 64: return N_U64;
15179 default: ;
15180 }
15181 break;
15182
15183 case NT_bfloat:
15184 if (size == 16) return N_BF16;
15185 break;
15186
15187 default: ;
15188 }
15189
15190 return N_UTYP;
15191}
15192
15193/* Convert compact Neon bitmask type representation to a type and size. Only
15194 handles the case where a single bit is set in the mask. */
15195
15196static int
15197el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15198 enum neon_type_mask mask)
15199{
15200 if ((mask & N_EQK) != 0)
15201 return FAIL;
15202
15203 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15204 *size = 8;
15205 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15206 != 0)
15207 *size = 16;
15208 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15209 *size = 32;
15210 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15211 *size = 64;
15212 else
15213 return FAIL;
15214
15215 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15216 *type = NT_signed;
15217 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15218 *type = NT_unsigned;
15219 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15220 *type = NT_integer;
15221 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15222 *type = NT_untyped;
15223 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15224 *type = NT_poly;
15225 else if ((mask & (N_F_ALL)) != 0)
15226 *type = NT_float;
15227 else if ((mask & (N_BF16)) != 0)
15228 *type = NT_bfloat;
15229 else
15230 return FAIL;
15231
15232 return SUCCESS;
15233}
15234
15235/* Modify a bitmask of allowed types. This is only needed for type
15236 relaxation. */
15237
15238static unsigned
15239modify_types_allowed (unsigned allowed, unsigned mods)
15240{
15241 unsigned size;
15242 enum neon_el_type type;
15243 unsigned destmask;
15244 int i;
15245
15246 destmask = 0;
15247
15248 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15249 {
15250 if (el_type_of_type_chk (&type, &size,
15251 (enum neon_type_mask) (allowed & i)) == SUCCESS)
15252 {
15253 neon_modify_type_size (mods, &type, &size);
15254 destmask |= type_chk_of_el_type (type, size);
15255 }
15256 }
15257
15258 return destmask;
15259}
15260
15261/* Check type and return type classification.
15262 The manual states (paraphrase): If one datatype is given, it indicates the
15263 type given in:
15264 - the second operand, if there is one
15265 - the operand, if there is no second operand
15266 - the result, if there are no operands.
15267 This isn't quite good enough though, so we use a concept of a "key" datatype
15268 which is set on a per-instruction basis, which is the one which matters when
15269 only one data type is written.
15270 Note: this function has side-effects (e.g. filling in missing operands). All
15271 Neon instructions should call it before performing bit encoding. */
15272
15273static struct neon_type_el
15274neon_check_type (unsigned els, enum neon_shape ns, ...)
15275{
15276 va_list ap;
15277 unsigned i, pass, key_el = 0;
15278 unsigned types[NEON_MAX_TYPE_ELS];
15279 enum neon_el_type k_type = NT_invtype;
15280 unsigned k_size = -1u;
15281 struct neon_type_el badtype = {NT_invtype, -1};
15282 unsigned key_allowed = 0;
15283
15284 /* Optional registers in Neon instructions are always (not) in operand 1.
15285 Fill in the missing operand here, if it was omitted. */
15286 if (els > 1 && !inst.operands[1].present)
15287 inst.operands[1] = inst.operands[0];
15288
15289 /* Suck up all the varargs. */
15290 va_start (ap, ns);
15291 for (i = 0; i < els; i++)
15292 {
15293 unsigned thisarg = va_arg (ap, unsigned);
15294 if (thisarg == N_IGNORE_TYPE)
15295 {
15296 va_end (ap);
15297 return badtype;
15298 }
15299 types[i] = thisarg;
15300 if ((thisarg & N_KEY) != 0)
15301 key_el = i;
15302 }
15303 va_end (ap);
15304
15305 if (inst.vectype.elems > 0)
15306 for (i = 0; i < els; i++)
15307 if (inst.operands[i].vectype.type != NT_invtype)
15308 {
15309 first_error (_("types specified in both the mnemonic and operands"));
15310 return badtype;
15311 }
15312
15313 /* Duplicate inst.vectype elements here as necessary.
15314 FIXME: No idea if this is exactly the same as the ARM assembler,
15315 particularly when an insn takes one register and one non-register
15316 operand. */
15317 if (inst.vectype.elems == 1 && els > 1)
15318 {
15319 unsigned j;
15320 inst.vectype.elems = els;
15321 inst.vectype.el[key_el] = inst.vectype.el[0];
15322 for (j = 0; j < els; j++)
15323 if (j != key_el)
15324 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15325 types[j]);
15326 }
15327 else if (inst.vectype.elems == 0 && els > 0)
15328 {
15329 unsigned j;
15330 /* No types were given after the mnemonic, so look for types specified
15331 after each operand. We allow some flexibility here; as long as the
15332 "key" operand has a type, we can infer the others. */
15333 for (j = 0; j < els; j++)
15334 if (inst.operands[j].vectype.type != NT_invtype)
15335 inst.vectype.el[j] = inst.operands[j].vectype;
15336
15337 if (inst.operands[key_el].vectype.type != NT_invtype)
15338 {
15339 for (j = 0; j < els; j++)
15340 if (inst.operands[j].vectype.type == NT_invtype)
15341 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15342 types[j]);
15343 }
15344 else
15345 {
15346 first_error (_("operand types can't be inferred"));
15347 return badtype;
15348 }
15349 }
15350 else if (inst.vectype.elems != els)
15351 {
15352 first_error (_("type specifier has the wrong number of parts"));
15353 return badtype;
15354 }
15355
15356 for (pass = 0; pass < 2; pass++)
15357 {
15358 for (i = 0; i < els; i++)
15359 {
15360 unsigned thisarg = types[i];
15361 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15362 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15363 enum neon_el_type g_type = inst.vectype.el[i].type;
15364 unsigned g_size = inst.vectype.el[i].size;
15365
15366 /* Decay more-specific signed & unsigned types to sign-insensitive
15367 integer types if sign-specific variants are unavailable. */
15368 if ((g_type == NT_signed || g_type == NT_unsigned)
15369 && (types_allowed & N_SU_ALL) == 0)
15370 g_type = NT_integer;
15371
15372 /* If only untyped args are allowed, decay any more specific types to
15373 them. Some instructions only care about signs for some element
15374 sizes, so handle that properly. */
15375 if (((types_allowed & N_UNT) == 0)
15376 && ((g_size == 8 && (types_allowed & N_8) != 0)
15377 || (g_size == 16 && (types_allowed & N_16) != 0)
15378 || (g_size == 32 && (types_allowed & N_32) != 0)
15379 || (g_size == 64 && (types_allowed & N_64) != 0)))
15380 g_type = NT_untyped;
15381
15382 if (pass == 0)
15383 {
15384 if ((thisarg & N_KEY) != 0)
15385 {
15386 k_type = g_type;
15387 k_size = g_size;
15388 key_allowed = thisarg & ~N_KEY;
15389
15390 /* Check architecture constraint on FP16 extension. */
15391 if (k_size == 16
15392 && k_type == NT_float
15393 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15394 {
15395 inst.error = _(BAD_FP16);
15396 return badtype;
15397 }
15398 }
15399 }
15400 else
15401 {
15402 if ((thisarg & N_VFP) != 0)
15403 {
15404 enum neon_shape_el regshape;
15405 unsigned regwidth, match;
15406
15407 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
15408 if (ns == NS_NULL)
15409 {
15410 first_error (_("invalid instruction shape"));
15411 return badtype;
15412 }
15413 regshape = neon_shape_tab[ns].el[i];
15414 regwidth = neon_shape_el_size[regshape];
15415
15416 /* In VFP mode, operands must match register widths. If we
15417 have a key operand, use its width, else use the width of
15418 the current operand. */
15419 if (k_size != -1u)
15420 match = k_size;
15421 else
15422 match = g_size;
15423
15424 /* FP16 will use a single precision register. */
15425 if (regwidth == 32 && match == 16)
15426 {
15427 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15428 match = regwidth;
15429 else
15430 {
15431 inst.error = _(BAD_FP16);
15432 return badtype;
15433 }
15434 }
15435
15436 if (regwidth != match)
15437 {
15438 first_error (_("operand size must match register width"));
15439 return badtype;
15440 }
15441 }
15442
15443 if ((thisarg & N_EQK) == 0)
15444 {
15445 unsigned given_type = type_chk_of_el_type (g_type, g_size);
15446
15447 if ((given_type & types_allowed) == 0)
15448 {
15449 first_error (BAD_SIMD_TYPE);
15450 return badtype;
15451 }
15452 }
15453 else
15454 {
15455 enum neon_el_type mod_k_type = k_type;
15456 unsigned mod_k_size = k_size;
15457 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15458 if (g_type != mod_k_type || g_size != mod_k_size)
15459 {
15460 first_error (_("inconsistent types in Neon instruction"));
15461 return badtype;
15462 }
15463 }
15464 }
15465 }
15466 }
15467
15468 return inst.vectype.el[key_el];
15469}
15470
15471/* Neon-style VFP instruction forwarding. */
15472
15473/* Thumb VFP instructions have 0xE in the condition field. */
15474
15475static void
15476do_vfp_cond_or_thumb (void)
15477{
15478 inst.is_neon = 1;
15479
15480 if (thumb_mode)
15481 inst.instruction |= 0xe0000000;
15482 else
15483 inst.instruction |= inst.cond << 28;
15484}
15485
15486/* Look up and encode a simple mnemonic, for use as a helper function for the
15487 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
15488 etc. It is assumed that operand parsing has already been done, and that the
15489 operands are in the form expected by the given opcode (this isn't necessarily
15490 the same as the form in which they were parsed, hence some massaging must
15491 take place before this function is called).
15492 Checks current arch version against that in the looked-up opcode. */
15493
15494static void
15495do_vfp_nsyn_opcode (const char *opname)
15496{
15497 const struct asm_opcode *opcode;
15498
15499 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
15500
15501 if (!opcode)
15502 abort ();
15503
15504 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15505 thumb_mode ? *opcode->tvariant : *opcode->avariant),
15506 _(BAD_FPU));
15507
15508 inst.is_neon = 1;
15509
15510 if (thumb_mode)
15511 {
15512 inst.instruction = opcode->tvalue;
15513 opcode->tencode ();
15514 }
15515 else
15516 {
15517 inst.instruction = (inst.cond << 28) | opcode->avalue;
15518 opcode->aencode ();
15519 }
15520}
15521
15522static void
15523do_vfp_nsyn_add_sub (enum neon_shape rs)
15524{
15525 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15526
15527 if (rs == NS_FFF || rs == NS_HHH)
15528 {
15529 if (is_add)
15530 do_vfp_nsyn_opcode ("fadds");
15531 else
15532 do_vfp_nsyn_opcode ("fsubs");
15533
15534 /* ARMv8.2 fp16 instruction. */
15535 if (rs == NS_HHH)
15536 do_scalar_fp16_v82_encode ();
15537 }
15538 else
15539 {
15540 if (is_add)
15541 do_vfp_nsyn_opcode ("faddd");
15542 else
15543 do_vfp_nsyn_opcode ("fsubd");
15544 }
15545}
15546
15547/* Check operand types to see if this is a VFP instruction, and if so call
15548 PFN (). */
15549
15550static int
15551try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15552{
15553 enum neon_shape rs;
15554 struct neon_type_el et;
15555
15556 switch (args)
15557 {
15558 case 2:
15559 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15560 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15561 break;
15562
15563 case 3:
15564 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15565 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15566 N_F_ALL | N_KEY | N_VFP);
15567 break;
15568
15569 default:
15570 abort ();
15571 }
15572
15573 if (et.type != NT_invtype)
15574 {
15575 pfn (rs);
15576 return SUCCESS;
15577 }
15578
15579 inst.error = NULL;
15580 return FAIL;
15581}
15582
15583static void
15584do_vfp_nsyn_mla_mls (enum neon_shape rs)
15585{
15586 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15587
15588 if (rs == NS_FFF || rs == NS_HHH)
15589 {
15590 if (is_mla)
15591 do_vfp_nsyn_opcode ("fmacs");
15592 else
15593 do_vfp_nsyn_opcode ("fnmacs");
15594
15595 /* ARMv8.2 fp16 instruction. */
15596 if (rs == NS_HHH)
15597 do_scalar_fp16_v82_encode ();
15598 }
15599 else
15600 {
15601 if (is_mla)
15602 do_vfp_nsyn_opcode ("fmacd");
15603 else
15604 do_vfp_nsyn_opcode ("fnmacd");
15605 }
15606}
15607
15608static void
15609do_vfp_nsyn_fma_fms (enum neon_shape rs)
15610{
15611 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15612
15613 if (rs == NS_FFF || rs == NS_HHH)
15614 {
15615 if (is_fma)
15616 do_vfp_nsyn_opcode ("ffmas");
15617 else
15618 do_vfp_nsyn_opcode ("ffnmas");
15619
15620 /* ARMv8.2 fp16 instruction. */
15621 if (rs == NS_HHH)
15622 do_scalar_fp16_v82_encode ();
15623 }
15624 else
15625 {
15626 if (is_fma)
15627 do_vfp_nsyn_opcode ("ffmad");
15628 else
15629 do_vfp_nsyn_opcode ("ffnmad");
15630 }
15631}
15632
15633static void
15634do_vfp_nsyn_mul (enum neon_shape rs)
15635{
15636 if (rs == NS_FFF || rs == NS_HHH)
15637 {
15638 do_vfp_nsyn_opcode ("fmuls");
15639
15640 /* ARMv8.2 fp16 instruction. */
15641 if (rs == NS_HHH)
15642 do_scalar_fp16_v82_encode ();
15643 }
15644 else
15645 do_vfp_nsyn_opcode ("fmuld");
15646}
15647
15648static void
15649do_vfp_nsyn_abs_neg (enum neon_shape rs)
15650{
15651 int is_neg = (inst.instruction & 0x80) != 0;
15652 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15653
15654 if (rs == NS_FF || rs == NS_HH)
15655 {
15656 if (is_neg)
15657 do_vfp_nsyn_opcode ("fnegs");
15658 else
15659 do_vfp_nsyn_opcode ("fabss");
15660
15661 /* ARMv8.2 fp16 instruction. */
15662 if (rs == NS_HH)
15663 do_scalar_fp16_v82_encode ();
15664 }
15665 else
15666 {
15667 if (is_neg)
15668 do_vfp_nsyn_opcode ("fnegd");
15669 else
15670 do_vfp_nsyn_opcode ("fabsd");
15671 }
15672}
15673
15674/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15675 insns belong to Neon, and are handled elsewhere. */
15676
15677static void
15678do_vfp_nsyn_ldm_stm (int is_dbmode)
15679{
15680 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15681 if (is_ldm)
15682 {
15683 if (is_dbmode)
15684 do_vfp_nsyn_opcode ("fldmdbs");
15685 else
15686 do_vfp_nsyn_opcode ("fldmias");
15687 }
15688 else
15689 {
15690 if (is_dbmode)
15691 do_vfp_nsyn_opcode ("fstmdbs");
15692 else
15693 do_vfp_nsyn_opcode ("fstmias");
15694 }
15695}
15696
15697static void
15698do_vfp_nsyn_sqrt (void)
15699{
15700 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15701 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15702
15703 if (rs == NS_FF || rs == NS_HH)
15704 {
15705 do_vfp_nsyn_opcode ("fsqrts");
15706
15707 /* ARMv8.2 fp16 instruction. */
15708 if (rs == NS_HH)
15709 do_scalar_fp16_v82_encode ();
15710 }
15711 else
15712 do_vfp_nsyn_opcode ("fsqrtd");
15713}
15714
15715static void
15716do_vfp_nsyn_div (void)
15717{
15718 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15719 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15720 N_F_ALL | N_KEY | N_VFP);
15721
15722 if (rs == NS_FFF || rs == NS_HHH)
15723 {
15724 do_vfp_nsyn_opcode ("fdivs");
15725
15726 /* ARMv8.2 fp16 instruction. */
15727 if (rs == NS_HHH)
15728 do_scalar_fp16_v82_encode ();
15729 }
15730 else
15731 do_vfp_nsyn_opcode ("fdivd");
15732}
15733
15734static void
15735do_vfp_nsyn_nmul (void)
15736{
15737 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15738 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15739 N_F_ALL | N_KEY | N_VFP);
15740
15741 if (rs == NS_FFF || rs == NS_HHH)
15742 {
15743 NEON_ENCODE (SINGLE, inst);
15744 do_vfp_sp_dyadic ();
15745
15746 /* ARMv8.2 fp16 instruction. */
15747 if (rs == NS_HHH)
15748 do_scalar_fp16_v82_encode ();
15749 }
15750 else
15751 {
15752 NEON_ENCODE (DOUBLE, inst);
15753 do_vfp_dp_rd_rn_rm ();
15754 }
15755 do_vfp_cond_or_thumb ();
15756
15757}
15758
15759/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15760 (0, 1, 2, 3). */
15761
15762static unsigned
15763neon_logbits (unsigned x)
15764{
15765 return ffs (x) - 4;
15766}
15767
15768#define LOW4(R) ((R) & 0xf)
15769#define HI1(R) (((R) >> 4) & 1)
15770
15771static unsigned
15772mve_get_vcmp_vpt_cond (struct neon_type_el et)
15773{
15774 switch (et.type)
15775 {
15776 default:
15777 first_error (BAD_EL_TYPE);
15778 return 0;
15779 case NT_float:
15780 switch (inst.operands[0].imm)
15781 {
15782 default:
15783 first_error (_("invalid condition"));
15784 return 0;
15785 case 0x0:
15786 /* eq. */
15787 return 0;
15788 case 0x1:
15789 /* ne. */
15790 return 1;
15791 case 0xa:
15792 /* ge/ */
15793 return 4;
15794 case 0xb:
15795 /* lt. */
15796 return 5;
15797 case 0xc:
15798 /* gt. */
15799 return 6;
15800 case 0xd:
15801 /* le. */
15802 return 7;
15803 }
15804 case NT_integer:
15805 /* only accept eq and ne. */
15806 if (inst.operands[0].imm > 1)
15807 {
15808 first_error (_("invalid condition"));
15809 return 0;
15810 }
15811 return inst.operands[0].imm;
15812 case NT_unsigned:
15813 if (inst.operands[0].imm == 0x2)
15814 return 2;
15815 else if (inst.operands[0].imm == 0x8)
15816 return 3;
15817 else
15818 {
15819 first_error (_("invalid condition"));
15820 return 0;
15821 }
15822 case NT_signed:
15823 switch (inst.operands[0].imm)
15824 {
15825 default:
15826 first_error (_("invalid condition"));
15827 return 0;
15828 case 0xa:
15829 /* ge. */
15830 return 4;
15831 case 0xb:
15832 /* lt. */
15833 return 5;
15834 case 0xc:
15835 /* gt. */
15836 return 6;
15837 case 0xd:
15838 /* le. */
15839 return 7;
15840 }
15841 }
15842 /* Should be unreachable. */
15843 abort ();
15844}
15845
15846/* For VCTP (create vector tail predicate) in MVE. */
15847static void
15848do_mve_vctp (void)
15849{
15850 int dt = 0;
15851 unsigned size = 0x0;
15852
15853 if (inst.cond > COND_ALWAYS)
15854 inst.pred_insn_type = INSIDE_VPT_INSN;
15855 else
15856 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15857
15858 /* This is a typical MVE instruction which has no type but have size 8, 16,
15859 32 and 64. For instructions with no type, inst.vectype.el[j].type is set
15860 to NT_untyped and size is updated in inst.vectype.el[j].size. */
15861 if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15862 dt = inst.vectype.el[0].size;
15863
15864 /* Setting this does not indicate an actual NEON instruction, but only
15865 indicates that the mnemonic accepts neon-style type suffixes. */
15866 inst.is_neon = 1;
15867
15868 switch (dt)
15869 {
15870 case 8:
15871 break;
15872 case 16:
15873 size = 0x1; break;
15874 case 32:
15875 size = 0x2; break;
15876 case 64:
15877 size = 0x3; break;
15878 default:
15879 first_error (_("Type is not allowed for this instruction"));
15880 }
15881 inst.instruction |= size << 20;
15882 inst.instruction |= inst.operands[0].reg << 16;
15883}
15884
15885static void
15886do_mve_vpt (void)
15887{
15888 /* We are dealing with a vector predicated block. */
15889 if (inst.operands[0].present)
15890 {
15891 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
15892 struct neon_type_el et
15893 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
15894 N_EQK);
15895
15896 unsigned fcond = mve_get_vcmp_vpt_cond (et);
15897
15898 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
15899
15900 if (et.type == NT_invtype)
15901 return;
15902
15903 if (et.type == NT_float)
15904 {
15905 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
15906 BAD_FPU);
15907 constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
15908 inst.instruction |= (et.size == 16) << 28;
15909 inst.instruction |= 0x3 << 20;
15910 }
15911 else
15912 {
15913 constraint (et.size != 8 && et.size != 16 && et.size != 32,
15914 BAD_EL_TYPE);
15915 inst.instruction |= 1 << 28;
15916 inst.instruction |= neon_logbits (et.size) << 20;
15917 }
15918
15919 if (inst.operands[2].isquad)
15920 {
15921 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15922 inst.instruction |= LOW4 (inst.operands[2].reg);
15923 inst.instruction |= (fcond & 0x2) >> 1;
15924 }
15925 else
15926 {
15927 if (inst.operands[2].reg == REG_SP)
15928 as_tsktsk (MVE_BAD_SP);
15929 inst.instruction |= 1 << 6;
15930 inst.instruction |= (fcond & 0x2) << 4;
15931 inst.instruction |= inst.operands[2].reg;
15932 }
15933 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15934 inst.instruction |= (fcond & 0x4) << 10;
15935 inst.instruction |= (fcond & 0x1) << 7;
15936
15937 }
15938 set_pred_insn_type (VPT_INSN);
15939 now_pred.cc = 0;
15940 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
15941 | ((inst.instruction & 0xe000) >> 13);
15942 now_pred.warn_deprecated = FALSE;
15943 now_pred.type = VECTOR_PRED;
15944 inst.is_neon = 1;
15945}
15946
15947static void
15948do_mve_vcmp (void)
15949{
15950 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
15951 if (!inst.operands[1].isreg || !inst.operands[1].isquad)
15952 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
15953 if (!inst.operands[2].present)
15954 first_error (_("MVE vector or ARM register expected"));
15955 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
15956
15957 /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe. */
15958 if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
15959 && inst.operands[1].isquad)
15960 {
15961 inst.instruction = N_MNEM_vcmp;
15962 inst.cond = 0x10;
15963 }
15964
15965 if (inst.cond > COND_ALWAYS)
15966 inst.pred_insn_type = INSIDE_VPT_INSN;
15967 else
15968 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15969
15970 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
15971 struct neon_type_el et
15972 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
15973 N_EQK);
15974
15975 constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
15976 && !inst.operands[2].iszr, BAD_PC);
15977
15978 unsigned fcond = mve_get_vcmp_vpt_cond (et);
15979
15980 inst.instruction = 0xee010f00;
15981 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15982 inst.instruction |= (fcond & 0x4) << 10;
15983 inst.instruction |= (fcond & 0x1) << 7;
15984 if (et.type == NT_float)
15985 {
15986 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
15987 BAD_FPU);
15988 inst.instruction |= (et.size == 16) << 28;
15989 inst.instruction |= 0x3 << 20;
15990 }
15991 else
15992 {
15993 inst.instruction |= 1 << 28;
15994 inst.instruction |= neon_logbits (et.size) << 20;
15995 }
15996 if (inst.operands[2].isquad)
15997 {
15998 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15999 inst.instruction |= (fcond & 0x2) >> 1;
16000 inst.instruction |= LOW4 (inst.operands[2].reg);
16001 }
16002 else
16003 {
16004 if (inst.operands[2].reg == REG_SP)
16005 as_tsktsk (MVE_BAD_SP);
16006 inst.instruction |= 1 << 6;
16007 inst.instruction |= (fcond & 0x2) << 4;
16008 inst.instruction |= inst.operands[2].reg;
16009 }
16010
16011 inst.is_neon = 1;
16012 return;
16013}
16014
16015static void
16016do_mve_vmaxa_vmina (void)
16017{
16018 if (inst.cond > COND_ALWAYS)
16019 inst.pred_insn_type = INSIDE_VPT_INSN;
16020 else
16021 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16022
16023 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16024 struct neon_type_el et
16025 = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16026
16027 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16028 inst.instruction |= neon_logbits (et.size) << 18;
16029 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16030 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16031 inst.instruction |= LOW4 (inst.operands[1].reg);
16032 inst.is_neon = 1;
16033}
16034
16035static void
16036do_mve_vfmas (void)
16037{
16038 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16039 struct neon_type_el et
16040 = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16041
16042 if (inst.cond > COND_ALWAYS)
16043 inst.pred_insn_type = INSIDE_VPT_INSN;
16044 else
16045 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16046
16047 if (inst.operands[2].reg == REG_SP)
16048 as_tsktsk (MVE_BAD_SP);
16049 else if (inst.operands[2].reg == REG_PC)
16050 as_tsktsk (MVE_BAD_PC);
16051
16052 inst.instruction |= (et.size == 16) << 28;
16053 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16054 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16055 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16056 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16057 inst.instruction |= inst.operands[2].reg;
16058 inst.is_neon = 1;
16059}
16060
16061static void
16062do_mve_viddup (void)
16063{
16064 if (inst.cond > COND_ALWAYS)
16065 inst.pred_insn_type = INSIDE_VPT_INSN;
16066 else
16067 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16068
16069 unsigned imm = inst.relocs[0].exp.X_add_number;
16070 constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16071 _("immediate must be either 1, 2, 4 or 8"));
16072
16073 enum neon_shape rs;
16074 struct neon_type_el et;
16075 unsigned Rm;
16076 if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16077 {
16078 rs = neon_select_shape (NS_QRI, NS_NULL);
16079 et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16080 Rm = 7;
16081 }
16082 else
16083 {
16084 constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16085 if (inst.operands[2].reg == REG_SP)
16086 as_tsktsk (MVE_BAD_SP);
16087 else if (inst.operands[2].reg == REG_PC)
16088 first_error (BAD_PC);
16089
16090 rs = neon_select_shape (NS_QRRI, NS_NULL);
16091 et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16092 Rm = inst.operands[2].reg >> 1;
16093 }
16094 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16095 inst.instruction |= neon_logbits (et.size) << 20;
16096 inst.instruction |= inst.operands[1].reg << 16;
16097 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16098 inst.instruction |= (imm > 2) << 7;
16099 inst.instruction |= Rm << 1;
16100 inst.instruction |= (imm == 2 || imm == 8);
16101 inst.is_neon = 1;
16102}
16103
16104static void
16105do_mve_vmlas (void)
16106{
16107 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16108 struct neon_type_el et
16109 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16110
16111 if (inst.operands[2].reg == REG_PC)
16112 as_tsktsk (MVE_BAD_PC);
16113 else if (inst.operands[2].reg == REG_SP)
16114 as_tsktsk (MVE_BAD_SP);
16115
16116 if (inst.cond > COND_ALWAYS)
16117 inst.pred_insn_type = INSIDE_VPT_INSN;
16118 else
16119 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16120
16121 inst.instruction |= (et.type == NT_unsigned) << 28;
16122 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16123 inst.instruction |= neon_logbits (et.size) << 20;
16124 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16125 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16126 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16127 inst.instruction |= inst.operands[2].reg;
16128 inst.is_neon = 1;
16129}
16130
16131static void
16132do_mve_vshll (void)
16133{
16134 struct neon_type_el et
16135 = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16136
16137 if (inst.cond > COND_ALWAYS)
16138 inst.pred_insn_type = INSIDE_VPT_INSN;
16139 else
16140 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16141
16142 int imm = inst.operands[2].imm;
16143 constraint (imm < 1 || (unsigned)imm > et.size,
16144 _("immediate value out of range"));
16145
16146 if ((unsigned)imm == et.size)
16147 {
16148 inst.instruction |= neon_logbits (et.size) << 18;
16149 inst.instruction |= 0x110001;
16150 }
16151 else
16152 {
16153 inst.instruction |= (et.size + imm) << 16;
16154 inst.instruction |= 0x800140;
16155 }
16156
16157 inst.instruction |= (et.type == NT_unsigned) << 28;
16158 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16159 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16160 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16161 inst.instruction |= LOW4 (inst.operands[1].reg);
16162 inst.is_neon = 1;
16163}
16164
16165static void
16166do_mve_vshlc (void)
16167{
16168 if (inst.cond > COND_ALWAYS)
16169 inst.pred_insn_type = INSIDE_VPT_INSN;
16170 else
16171 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16172
16173 if (inst.operands[1].reg == REG_PC)
16174 as_tsktsk (MVE_BAD_PC);
16175 else if (inst.operands[1].reg == REG_SP)
16176 as_tsktsk (MVE_BAD_SP);
16177
16178 int imm = inst.operands[2].imm;
16179 constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16180
16181 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16182 inst.instruction |= (imm & 0x1f) << 16;
16183 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16184 inst.instruction |= inst.operands[1].reg;
16185 inst.is_neon = 1;
16186}
16187
16188static void
16189do_mve_vshrn (void)
16190{
16191 unsigned types;
16192 switch (inst.instruction)
16193 {
16194 case M_MNEM_vshrnt:
16195 case M_MNEM_vshrnb:
16196 case M_MNEM_vrshrnt:
16197 case M_MNEM_vrshrnb:
16198 types = N_I16 | N_I32;
16199 break;
16200 case M_MNEM_vqshrnt:
16201 case M_MNEM_vqshrnb:
16202 case M_MNEM_vqrshrnt:
16203 case M_MNEM_vqrshrnb:
16204 types = N_U16 | N_U32 | N_S16 | N_S32;
16205 break;
16206 case M_MNEM_vqshrunt:
16207 case M_MNEM_vqshrunb:
16208 case M_MNEM_vqrshrunt:
16209 case M_MNEM_vqrshrunb:
16210 types = N_S16 | N_S32;
16211 break;
16212 default:
16213 abort ();
16214 }
16215
16216 struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16217
16218 if (inst.cond > COND_ALWAYS)
16219 inst.pred_insn_type = INSIDE_VPT_INSN;
16220 else
16221 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16222
16223 unsigned Qd = inst.operands[0].reg;
16224 unsigned Qm = inst.operands[1].reg;
16225 unsigned imm = inst.operands[2].imm;
16226 constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16227 et.size == 16
16228 ? _("immediate operand expected in the range [1,8]")
16229 : _("immediate operand expected in the range [1,16]"));
16230
16231 inst.instruction |= (et.type == NT_unsigned) << 28;
16232 inst.instruction |= HI1 (Qd) << 22;
16233 inst.instruction |= (et.size - imm) << 16;
16234 inst.instruction |= LOW4 (Qd) << 12;
16235 inst.instruction |= HI1 (Qm) << 5;
16236 inst.instruction |= LOW4 (Qm);
16237 inst.is_neon = 1;
16238}
16239
16240static void
16241do_mve_vqmovn (void)
16242{
16243 struct neon_type_el et;
16244 if (inst.instruction == M_MNEM_vqmovnt
16245 || inst.instruction == M_MNEM_vqmovnb)
16246 et = neon_check_type (2, NS_QQ, N_EQK,
16247 N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16248 else
16249 et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16250
16251 if (inst.cond > COND_ALWAYS)
16252 inst.pred_insn_type = INSIDE_VPT_INSN;
16253 else
16254 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16255
16256 inst.instruction |= (et.type == NT_unsigned) << 28;
16257 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16258 inst.instruction |= (et.size == 32) << 18;
16259 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16260 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16261 inst.instruction |= LOW4 (inst.operands[1].reg);
16262 inst.is_neon = 1;
16263}
16264
16265static void
16266do_mve_vpsel (void)
16267{
16268 neon_select_shape (NS_QQQ, NS_NULL);
16269
16270 if (inst.cond > COND_ALWAYS)
16271 inst.pred_insn_type = INSIDE_VPT_INSN;
16272 else
16273 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16274
16275 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16276 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16277 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16278 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16279 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16280 inst.instruction |= LOW4 (inst.operands[2].reg);
16281 inst.is_neon = 1;
16282}
16283
16284static void
16285do_mve_vpnot (void)
16286{
16287 if (inst.cond > COND_ALWAYS)
16288 inst.pred_insn_type = INSIDE_VPT_INSN;
16289 else
16290 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16291}
16292
16293static void
16294do_mve_vmaxnma_vminnma (void)
16295{
16296 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16297 struct neon_type_el et
16298 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16299
16300 if (inst.cond > COND_ALWAYS)
16301 inst.pred_insn_type = INSIDE_VPT_INSN;
16302 else
16303 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16304
16305 inst.instruction |= (et.size == 16) << 28;
16306 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16307 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16308 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16309 inst.instruction |= LOW4 (inst.operands[1].reg);
16310 inst.is_neon = 1;
16311}
16312
16313static void
16314do_mve_vcmul (void)
16315{
16316 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16317 struct neon_type_el et
16318 = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16319
16320 if (inst.cond > COND_ALWAYS)
16321 inst.pred_insn_type = INSIDE_VPT_INSN;
16322 else
16323 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16324
16325 unsigned rot = inst.relocs[0].exp.X_add_number;
16326 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16327 _("immediate out of range"));
16328
16329 if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16330 || inst.operands[0].reg == inst.operands[2].reg))
16331 as_tsktsk (BAD_MVE_SRCDEST);
16332
16333 inst.instruction |= (et.size == 32) << 28;
16334 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16335 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16336 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16337 inst.instruction |= (rot > 90) << 12;
16338 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16339 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16340 inst.instruction |= LOW4 (inst.operands[2].reg);
16341 inst.instruction |= (rot == 90 || rot == 270);
16342 inst.is_neon = 1;
16343}
16344
16345/* To handle the Low Overhead Loop instructions
16346 in Armv8.1-M Mainline and MVE. */
16347static void
16348do_t_loloop (void)
16349{
16350 unsigned long insn = inst.instruction;
16351
16352 inst.instruction = THUMB_OP32 (inst.instruction);
16353
16354 if (insn == T_MNEM_lctp)
16355 return;
16356
16357 set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16358
16359 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16360 {
16361 struct neon_type_el et
16362 = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16363 inst.instruction |= neon_logbits (et.size) << 20;
16364 inst.is_neon = 1;
16365 }
16366
16367 switch (insn)
16368 {
16369 case T_MNEM_letp:
16370 constraint (!inst.operands[0].present,
16371 _("expected LR"));
16372 /* fall through. */
16373 case T_MNEM_le:
16374 /* le <label>. */
16375 if (!inst.operands[0].present)
16376 inst.instruction |= 1 << 21;
16377
16378 v8_1_loop_reloc (TRUE);
16379 break;
16380
16381 case T_MNEM_wls:
16382 case T_MNEM_wlstp:
16383 v8_1_loop_reloc (FALSE);
16384 /* fall through. */
16385 case T_MNEM_dlstp:
16386 case T_MNEM_dls:
16387 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16388
16389 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16390 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16391 else if (inst.operands[1].reg == REG_PC)
16392 as_tsktsk (MVE_BAD_PC);
16393 if (inst.operands[1].reg == REG_SP)
16394 as_tsktsk (MVE_BAD_SP);
16395
16396 inst.instruction |= (inst.operands[1].reg << 16);
16397 break;
16398
16399 default:
16400 abort ();
16401 }
16402}
16403
16404
16405static void
16406do_vfp_nsyn_cmp (void)
16407{
16408 enum neon_shape rs;
16409 if (!inst.operands[0].isreg)
16410 {
16411 do_mve_vcmp ();
16412 return;
16413 }
16414 else
16415 {
16416 constraint (inst.operands[2].present, BAD_SYNTAX);
16417 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16418 BAD_FPU);
16419 }
16420
16421 if (inst.operands[1].isreg)
16422 {
16423 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16424 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16425
16426 if (rs == NS_FF || rs == NS_HH)
16427 {
16428 NEON_ENCODE (SINGLE, inst);
16429 do_vfp_sp_monadic ();
16430 }
16431 else
16432 {
16433 NEON_ENCODE (DOUBLE, inst);
16434 do_vfp_dp_rd_rm ();
16435 }
16436 }
16437 else
16438 {
16439 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16440 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16441
16442 switch (inst.instruction & 0x0fffffff)
16443 {
16444 case N_MNEM_vcmp:
16445 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16446 break;
16447 case N_MNEM_vcmpe:
16448 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16449 break;
16450 default:
16451 abort ();
16452 }
16453
16454 if (rs == NS_FI || rs == NS_HI)
16455 {
16456 NEON_ENCODE (SINGLE, inst);
16457 do_vfp_sp_compare_z ();
16458 }
16459 else
16460 {
16461 NEON_ENCODE (DOUBLE, inst);
16462 do_vfp_dp_rd ();
16463 }
16464 }
16465 do_vfp_cond_or_thumb ();
16466
16467 /* ARMv8.2 fp16 instruction. */
16468 if (rs == NS_HI || rs == NS_HH)
16469 do_scalar_fp16_v82_encode ();
16470}
16471
16472static void
16473nsyn_insert_sp (void)
16474{
16475 inst.operands[1] = inst.operands[0];
16476 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16477 inst.operands[0].reg = REG_SP;
16478 inst.operands[0].isreg = 1;
16479 inst.operands[0].writeback = 1;
16480 inst.operands[0].present = 1;
16481}
16482
16483static void
16484do_vfp_nsyn_push (void)
16485{
16486 nsyn_insert_sp ();
16487
16488 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16489 _("register list must contain at least 1 and at most 16 "
16490 "registers"));
16491
16492 if (inst.operands[1].issingle)
16493 do_vfp_nsyn_opcode ("fstmdbs");
16494 else
16495 do_vfp_nsyn_opcode ("fstmdbd");
16496}
16497
16498static void
16499do_vfp_nsyn_pop (void)
16500{
16501 nsyn_insert_sp ();
16502
16503 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16504 _("register list must contain at least 1 and at most 16 "
16505 "registers"));
16506
16507 if (inst.operands[1].issingle)
16508 do_vfp_nsyn_opcode ("fldmias");
16509 else
16510 do_vfp_nsyn_opcode ("fldmiad");
16511}
16512
16513/* Fix up Neon data-processing instructions, ORing in the correct bits for
16514 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
16515
16516static void
16517neon_dp_fixup (struct arm_it* insn)
16518{
16519 unsigned int i = insn->instruction;
16520 insn->is_neon = 1;
16521
16522 if (thumb_mode)
16523 {
16524 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
16525 if (i & (1 << 24))
16526 i |= 1 << 28;
16527
16528 i &= ~(1 << 24);
16529
16530 i |= 0xef000000;
16531 }
16532 else
16533 i |= 0xf2000000;
16534
16535 insn->instruction = i;
16536}
16537
16538static void
16539mve_encode_qqr (int size, int U, int fp)
16540{
16541 if (inst.operands[2].reg == REG_SP)
16542 as_tsktsk (MVE_BAD_SP);
16543 else if (inst.operands[2].reg == REG_PC)
16544 as_tsktsk (MVE_BAD_PC);
16545
16546 if (fp)
16547 {
16548 /* vadd. */
16549 if (((unsigned)inst.instruction) == 0xd00)
16550 inst.instruction = 0xee300f40;
16551 /* vsub. */
16552 else if (((unsigned)inst.instruction) == 0x200d00)
16553 inst.instruction = 0xee301f40;
16554 /* vmul. */
16555 else if (((unsigned)inst.instruction) == 0x1000d10)
16556 inst.instruction = 0xee310e60;
16557
16558 /* Setting size which is 1 for F16 and 0 for F32. */
16559 inst.instruction |= (size == 16) << 28;
16560 }
16561 else
16562 {
16563 /* vadd. */
16564 if (((unsigned)inst.instruction) == 0x800)
16565 inst.instruction = 0xee010f40;
16566 /* vsub. */
16567 else if (((unsigned)inst.instruction) == 0x1000800)
16568 inst.instruction = 0xee011f40;
16569 /* vhadd. */
16570 else if (((unsigned)inst.instruction) == 0)
16571 inst.instruction = 0xee000f40;
16572 /* vhsub. */
16573 else if (((unsigned)inst.instruction) == 0x200)
16574 inst.instruction = 0xee001f40;
16575 /* vmla. */
16576 else if (((unsigned)inst.instruction) == 0x900)
16577 inst.instruction = 0xee010e40;
16578 /* vmul. */
16579 else if (((unsigned)inst.instruction) == 0x910)
16580 inst.instruction = 0xee011e60;
16581 /* vqadd. */
16582 else if (((unsigned)inst.instruction) == 0x10)
16583 inst.instruction = 0xee000f60;
16584 /* vqsub. */
16585 else if (((unsigned)inst.instruction) == 0x210)
16586 inst.instruction = 0xee001f60;
16587 /* vqrdmlah. */
16588 else if (((unsigned)inst.instruction) == 0x3000b10)
16589 inst.instruction = 0xee000e40;
16590 /* vqdmulh. */
16591 else if (((unsigned)inst.instruction) == 0x0000b00)
16592 inst.instruction = 0xee010e60;
16593 /* vqrdmulh. */
16594 else if (((unsigned)inst.instruction) == 0x1000b00)
16595 inst.instruction = 0xfe010e60;
16596
16597 /* Set U-bit. */
16598 inst.instruction |= U << 28;
16599
16600 /* Setting bits for size. */
16601 inst.instruction |= neon_logbits (size) << 20;
16602 }
16603 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16604 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16605 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16606 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16607 inst.instruction |= inst.operands[2].reg;
16608 inst.is_neon = 1;
16609}
16610
16611static void
16612mve_encode_rqq (unsigned bit28, unsigned size)
16613{
16614 inst.instruction |= bit28 << 28;
16615 inst.instruction |= neon_logbits (size) << 20;
16616 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16617 inst.instruction |= inst.operands[0].reg << 12;
16618 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16619 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16620 inst.instruction |= LOW4 (inst.operands[2].reg);
16621 inst.is_neon = 1;
16622}
16623
16624static void
16625mve_encode_qqq (int ubit, int size)
16626{
16627
16628 inst.instruction |= (ubit != 0) << 28;
16629 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16630 inst.instruction |= neon_logbits (size) << 20;
16631 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16632 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16633 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16634 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16635 inst.instruction |= LOW4 (inst.operands[2].reg);
16636
16637 inst.is_neon = 1;
16638}
16639
16640static void
16641mve_encode_rq (unsigned bit28, unsigned size)
16642{
16643 inst.instruction |= bit28 << 28;
16644 inst.instruction |= neon_logbits (size) << 18;
16645 inst.instruction |= inst.operands[0].reg << 12;
16646 inst.instruction |= LOW4 (inst.operands[1].reg);
16647 inst.is_neon = 1;
16648}
16649
16650static void
16651mve_encode_rrqq (unsigned U, unsigned size)
16652{
16653 constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16654
16655 inst.instruction |= U << 28;
16656 inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16657 inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16658 inst.instruction |= (size == 32) << 16;
16659 inst.instruction |= inst.operands[0].reg << 12;
16660 inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16661 inst.instruction |= inst.operands[3].reg;
16662 inst.is_neon = 1;
16663}
16664
16665/* Helper function for neon_three_same handling the operands. */
16666static void
16667neon_three_args (int isquad)
16668{
16669 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16670 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16671 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16672 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16673 inst.instruction |= LOW4 (inst.operands[2].reg);
16674 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16675 inst.instruction |= (isquad != 0) << 6;
16676 inst.is_neon = 1;
16677}
16678
16679/* Encode insns with bit pattern:
16680
16681 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16682 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
16683
16684 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16685 different meaning for some instruction. */
16686
16687static void
16688neon_three_same (int isquad, int ubit, int size)
16689{
16690 neon_three_args (isquad);
16691 inst.instruction |= (ubit != 0) << 24;
16692 if (size != -1)
16693 inst.instruction |= neon_logbits (size) << 20;
16694
16695 neon_dp_fixup (&inst);
16696}
16697
16698/* Encode instructions of the form:
16699
16700 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
16701 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
16702
16703 Don't write size if SIZE == -1. */
16704
16705static void
16706neon_two_same (int qbit, int ubit, int size)
16707{
16708 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16709 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16710 inst.instruction |= LOW4 (inst.operands[1].reg);
16711 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16712 inst.instruction |= (qbit != 0) << 6;
16713 inst.instruction |= (ubit != 0) << 24;
16714
16715 if (size != -1)
16716 inst.instruction |= neon_logbits (size) << 18;
16717
16718 neon_dp_fixup (&inst);
16719}
16720
16721enum vfp_or_neon_is_neon_bits
16722{
16723NEON_CHECK_CC = 1,
16724NEON_CHECK_ARCH = 2,
16725NEON_CHECK_ARCH8 = 4
16726};
16727
16728/* Call this function if an instruction which may have belonged to the VFP or
16729 Neon instruction sets, but turned out to be a Neon instruction (due to the
16730 operand types involved, etc.). We have to check and/or fix-up a couple of
16731 things:
16732
16733 - Make sure the user hasn't attempted to make a Neon instruction
16734 conditional.
16735 - Alter the value in the condition code field if necessary.
16736 - Make sure that the arch supports Neon instructions.
16737
16738 Which of these operations take place depends on bits from enum
16739 vfp_or_neon_is_neon_bits.
16740
16741 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16742 current instruction's condition is COND_ALWAYS, the condition field is
16743 changed to inst.uncond_value. This is necessary because instructions shared
16744 between VFP and Neon may be conditional for the VFP variants only, and the
16745 unconditional Neon version must have, e.g., 0xF in the condition field. */
16746
16747static int
16748vfp_or_neon_is_neon (unsigned check)
16749{
16750/* Conditions are always legal in Thumb mode (IT blocks). */
16751if (!thumb_mode && (check & NEON_CHECK_CC))
16752 {
16753 if (inst.cond != COND_ALWAYS)
16754 {
16755 first_error (_(BAD_COND));
16756 return FAIL;
16757 }
16758 if (inst.uncond_value != -1)
16759 inst.instruction |= inst.uncond_value << 28;
16760 }
16761
16762
16763 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16764 || ((check & NEON_CHECK_ARCH8)
16765 && !mark_feature_used (&fpu_neon_ext_armv8)))
16766 {
16767 first_error (_(BAD_FPU));
16768 return FAIL;
16769 }
16770
16771return SUCCESS;
16772}
16773
16774
16775/* Return TRUE if the SIMD instruction is available for the current
16776 cpu_variant. FP is set to TRUE if this is a SIMD floating-point
16777 instruction. CHECK contains th. CHECK contains the set of bits to pass to
16778 vfp_or_neon_is_neon for the NEON specific checks. */
16779
16780static bfd_boolean
16781check_simd_pred_availability (int fp, unsigned check)
16782{
16783if (inst.cond > COND_ALWAYS)
16784 {
16785 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16786 {
16787 inst.error = BAD_FPU;
16788 return FALSE;
16789 }
16790 inst.pred_insn_type = INSIDE_VPT_INSN;
16791 }
16792else if (inst.cond < COND_ALWAYS)
16793 {
16794 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16795 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16796 else if (vfp_or_neon_is_neon (check) == FAIL)
16797 return FALSE;
16798 }
16799else
16800 {
16801 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16802 && vfp_or_neon_is_neon (check) == FAIL)
16803 return FALSE;
16804
16805 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16806 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16807 }
16808return TRUE;
16809}
16810
16811/* Neon instruction encoders, in approximate order of appearance. */
16812
16813static void
16814do_neon_dyadic_i_su (void)
16815{
16816 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16817 return;
16818
16819 enum neon_shape rs;
16820 struct neon_type_el et;
16821 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16822 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16823 else
16824 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16825
16826 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16827
16828
16829 if (rs != NS_QQR)
16830 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16831 else
16832 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16833}
16834
16835static void
16836do_neon_dyadic_i64_su (void)
16837{
16838 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
16839 return;
16840 enum neon_shape rs;
16841 struct neon_type_el et;
16842 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16843 {
16844 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16845 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16846 }
16847 else
16848 {
16849 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16850 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16851 }
16852 if (rs == NS_QQR)
16853 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16854 else
16855 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16856}
16857
16858static void
16859neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16860 unsigned immbits)
16861{
16862 unsigned size = et.size >> 3;
16863 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16864 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16865 inst.instruction |= LOW4 (inst.operands[1].reg);
16866 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16867 inst.instruction |= (isquad != 0) << 6;
16868 inst.instruction |= immbits << 16;
16869 inst.instruction |= (size >> 3) << 7;
16870 inst.instruction |= (size & 0x7) << 19;
16871 if (write_ubit)
16872 inst.instruction |= (uval != 0) << 24;
16873
16874 neon_dp_fixup (&inst);
16875}
16876
16877static void
16878do_neon_shl (void)
16879{
16880 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16881 return;
16882
16883 if (!inst.operands[2].isreg)
16884 {
16885 enum neon_shape rs;
16886 struct neon_type_el et;
16887 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16888 {
16889 rs = neon_select_shape (NS_QQI, NS_NULL);
16890 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16891 }
16892 else
16893 {
16894 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16895 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16896 }
16897 int imm = inst.operands[2].imm;
16898
16899 constraint (imm < 0 || (unsigned)imm >= et.size,
16900 _("immediate out of range for shift"));
16901 NEON_ENCODE (IMMED, inst);
16902 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16903 }
16904 else
16905 {
16906 enum neon_shape rs;
16907 struct neon_type_el et;
16908 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16909 {
16910 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16911 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16912 }
16913 else
16914 {
16915 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16916 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16917 }
16918
16919
16920 if (rs == NS_QQR)
16921 {
16922 constraint (inst.operands[0].reg != inst.operands[1].reg,
16923 _("invalid instruction shape"));
16924 if (inst.operands[2].reg == REG_SP)
16925 as_tsktsk (MVE_BAD_SP);
16926 else if (inst.operands[2].reg == REG_PC)
16927 as_tsktsk (MVE_BAD_PC);
16928
16929 inst.instruction = 0xee311e60;
16930 inst.instruction |= (et.type == NT_unsigned) << 28;
16931 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16932 inst.instruction |= neon_logbits (et.size) << 18;
16933 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16934 inst.instruction |= inst.operands[2].reg;
16935 inst.is_neon = 1;
16936 }
16937 else
16938 {
16939 unsigned int tmp;
16940
16941 /* VSHL/VQSHL 3-register variants have syntax such as:
16942 vshl.xx Dd, Dm, Dn
16943 whereas other 3-register operations encoded by neon_three_same have
16944 syntax like:
16945 vadd.xx Dd, Dn, Dm
16946 (i.e. with Dn & Dm reversed). Swap operands[1].reg and
16947 operands[2].reg here. */
16948 tmp = inst.operands[2].reg;
16949 inst.operands[2].reg = inst.operands[1].reg;
16950 inst.operands[1].reg = tmp;
16951 NEON_ENCODE (INTEGER, inst);
16952 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16953 }
16954 }
16955}
16956
16957static void
16958do_neon_qshl (void)
16959{
16960 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16961 return;
16962
16963 if (!inst.operands[2].isreg)
16964 {
16965 enum neon_shape rs;
16966 struct neon_type_el et;
16967 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16968 {
16969 rs = neon_select_shape (NS_QQI, NS_NULL);
16970 et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
16971 }
16972 else
16973 {
16974 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16975 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16976 }
16977 int imm = inst.operands[2].imm;
16978
16979 constraint (imm < 0 || (unsigned)imm >= et.size,
16980 _("immediate out of range for shift"));
16981 NEON_ENCODE (IMMED, inst);
16982 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
16983 }
16984 else
16985 {
16986 enum neon_shape rs;
16987 struct neon_type_el et;
16988
16989 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16990 {
16991 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16992 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16993 }
16994 else
16995 {
16996 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16997 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16998 }
16999
17000 if (rs == NS_QQR)
17001 {
17002 constraint (inst.operands[0].reg != inst.operands[1].reg,
17003 _("invalid instruction shape"));
17004 if (inst.operands[2].reg == REG_SP)
17005 as_tsktsk (MVE_BAD_SP);
17006 else if (inst.operands[2].reg == REG_PC)
17007 as_tsktsk (MVE_BAD_PC);
17008
17009 inst.instruction = 0xee311ee0;
17010 inst.instruction |= (et.type == NT_unsigned) << 28;
17011 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17012 inst.instruction |= neon_logbits (et.size) << 18;
17013 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17014 inst.instruction |= inst.operands[2].reg;
17015 inst.is_neon = 1;
17016 }
17017 else
17018 {
17019 unsigned int tmp;
17020
17021 /* See note in do_neon_shl. */
17022 tmp = inst.operands[2].reg;
17023 inst.operands[2].reg = inst.operands[1].reg;
17024 inst.operands[1].reg = tmp;
17025 NEON_ENCODE (INTEGER, inst);
17026 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17027 }
17028 }
17029}
17030
17031static void
17032do_neon_rshl (void)
17033{
17034 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17035 return;
17036
17037 enum neon_shape rs;
17038 struct neon_type_el et;
17039 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17040 {
17041 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17042 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17043 }
17044 else
17045 {
17046 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17047 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17048 }
17049
17050 unsigned int tmp;
17051
17052 if (rs == NS_QQR)
17053 {
17054 if (inst.operands[2].reg == REG_PC)
17055 as_tsktsk (MVE_BAD_PC);
17056 else if (inst.operands[2].reg == REG_SP)
17057 as_tsktsk (MVE_BAD_SP);
17058
17059 constraint (inst.operands[0].reg != inst.operands[1].reg,
17060 _("invalid instruction shape"));
17061
17062 if (inst.instruction == 0x0000510)
17063 /* We are dealing with vqrshl. */
17064 inst.instruction = 0xee331ee0;
17065 else
17066 /* We are dealing with vrshl. */
17067 inst.instruction = 0xee331e60;
17068
17069 inst.instruction |= (et.type == NT_unsigned) << 28;
17070 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17071 inst.instruction |= neon_logbits (et.size) << 18;
17072 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17073 inst.instruction |= inst.operands[2].reg;
17074 inst.is_neon = 1;
17075 }
17076 else
17077 {
17078 tmp = inst.operands[2].reg;
17079 inst.operands[2].reg = inst.operands[1].reg;
17080 inst.operands[1].reg = tmp;
17081 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17082 }
17083}
17084
17085static int
17086neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17087{
17088 /* Handle .I8 pseudo-instructions. */
17089 if (size == 8)
17090 {
17091 /* Unfortunately, this will make everything apart from zero out-of-range.
17092 FIXME is this the intended semantics? There doesn't seem much point in
17093 accepting .I8 if so. */
17094 immediate |= immediate << 8;
17095 size = 16;
17096 }
17097
17098 if (size >= 32)
17099 {
17100 if (immediate == (immediate & 0x000000ff))
17101 {
17102 *immbits = immediate;
17103 return 0x1;
17104 }
17105 else if (immediate == (immediate & 0x0000ff00))
17106 {
17107 *immbits = immediate >> 8;
17108 return 0x3;
17109 }
17110 else if (immediate == (immediate & 0x00ff0000))
17111 {
17112 *immbits = immediate >> 16;
17113 return 0x5;
17114 }
17115 else if (immediate == (immediate & 0xff000000))
17116 {
17117 *immbits = immediate >> 24;
17118 return 0x7;
17119 }
17120 if ((immediate & 0xffff) != (immediate >> 16))
17121 goto bad_immediate;
17122 immediate &= 0xffff;
17123 }
17124
17125 if (immediate == (immediate & 0x000000ff))
17126 {
17127 *immbits = immediate;
17128 return 0x9;
17129 }
17130 else if (immediate == (immediate & 0x0000ff00))
17131 {
17132 *immbits = immediate >> 8;
17133 return 0xb;
17134 }
17135
17136 bad_immediate:
17137 first_error (_("immediate value out of range"));
17138 return FAIL;
17139}
17140
17141static void
17142do_neon_logic (void)
17143{
17144 if (inst.operands[2].present && inst.operands[2].isreg)
17145 {
17146 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17147 if (rs == NS_QQQ
17148 && !check_simd_pred_availability (FALSE,
17149 NEON_CHECK_ARCH | NEON_CHECK_CC))
17150 return;
17151 else if (rs != NS_QQQ
17152 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17153 first_error (BAD_FPU);
17154
17155 neon_check_type (3, rs, N_IGNORE_TYPE);
17156 /* U bit and size field were set as part of the bitmask. */
17157 NEON_ENCODE (INTEGER, inst);
17158 neon_three_same (neon_quad (rs), 0, -1);
17159 }
17160 else
17161 {
17162 const int three_ops_form = (inst.operands[2].present
17163 && !inst.operands[2].isreg);
17164 const int immoperand = (three_ops_form ? 2 : 1);
17165 enum neon_shape rs = (three_ops_form
17166 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17167 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17168 /* Because neon_select_shape makes the second operand a copy of the first
17169 if the second operand is not present. */
17170 if (rs == NS_QQI
17171 && !check_simd_pred_availability (FALSE,
17172 NEON_CHECK_ARCH | NEON_CHECK_CC))
17173 return;
17174 else if (rs != NS_QQI
17175 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17176 first_error (BAD_FPU);
17177
17178 struct neon_type_el et;
17179 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17180 et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17181 else
17182 et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17183 | N_KEY, N_EQK);
17184
17185 if (et.type == NT_invtype)
17186 return;
17187 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17188 unsigned immbits;
17189 int cmode;
17190
17191
17192 if (three_ops_form)
17193 constraint (inst.operands[0].reg != inst.operands[1].reg,
17194 _("first and second operands shall be the same register"));
17195
17196 NEON_ENCODE (IMMED, inst);
17197
17198 immbits = inst.operands[immoperand].imm;
17199 if (et.size == 64)
17200 {
17201 /* .i64 is a pseudo-op, so the immediate must be a repeating
17202 pattern. */
17203 if (immbits != (inst.operands[immoperand].regisimm ?
17204 inst.operands[immoperand].reg : 0))
17205 {
17206 /* Set immbits to an invalid constant. */
17207 immbits = 0xdeadbeef;
17208 }
17209 }
17210
17211 switch (opcode)
17212 {
17213 case N_MNEM_vbic:
17214 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17215 break;
17216
17217 case N_MNEM_vorr:
17218 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17219 break;
17220
17221 case N_MNEM_vand:
17222 /* Pseudo-instruction for VBIC. */
17223 neon_invert_size (&immbits, 0, et.size);
17224 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17225 break;
17226
17227 case N_MNEM_vorn:
17228 /* Pseudo-instruction for VORR. */
17229 neon_invert_size (&immbits, 0, et.size);
17230 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17231 break;
17232
17233 default:
17234 abort ();
17235 }
17236
17237 if (cmode == FAIL)
17238 return;
17239
17240 inst.instruction |= neon_quad (rs) << 6;
17241 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17242 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17243 inst.instruction |= cmode << 8;
17244 neon_write_immbits (immbits);
17245
17246 neon_dp_fixup (&inst);
17247 }
17248}
17249
17250static void
17251do_neon_bitfield (void)
17252{
17253 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17254 neon_check_type (3, rs, N_IGNORE_TYPE);
17255 neon_three_same (neon_quad (rs), 0, -1);
17256}
17257
17258static void
17259neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17260 unsigned destbits)
17261{
17262 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17263 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17264 types | N_KEY);
17265 if (et.type == NT_float)
17266 {
17267 NEON_ENCODE (FLOAT, inst);
17268 if (rs == NS_QQR)
17269 mve_encode_qqr (et.size, 0, 1);
17270 else
17271 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17272 }
17273 else
17274 {
17275 NEON_ENCODE (INTEGER, inst);
17276 if (rs == NS_QQR)
17277 mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17278 else
17279 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17280 }
17281}
17282
17283
17284static void
17285do_neon_dyadic_if_su_d (void)
17286{
17287 /* This version only allow D registers, but that constraint is enforced during
17288 operand parsing so we don't need to do anything extra here. */
17289 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17290}
17291
17292static void
17293do_neon_dyadic_if_i_d (void)
17294{
17295 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17296 affected if we specify unsigned args. */
17297 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17298}
17299
17300static void
17301do_mve_vstr_vldr_QI (int size, int elsize, int load)
17302{
17303 constraint (size < 32, BAD_ADDR_MODE);
17304 constraint (size != elsize, BAD_EL_TYPE);
17305 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17306 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17307 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17308 _("destination register and offset register may not be the"
17309 " same"));
17310
17311 int imm = inst.relocs[0].exp.X_add_number;
17312 int add = 1;
17313 if (imm < 0)
17314 {
17315 add = 0;
17316 imm = -imm;
17317 }
17318 constraint ((imm % (size / 8) != 0)
17319 || imm > (0x7f << neon_logbits (size)),
17320 (size == 32) ? _("immediate must be a multiple of 4 in the"
17321 " range of +/-[0,508]")
17322 : _("immediate must be a multiple of 8 in the"
17323 " range of +/-[0,1016]"));
17324 inst.instruction |= 0x11 << 24;
17325 inst.instruction |= add << 23;
17326 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17327 inst.instruction |= inst.operands[1].writeback << 21;
17328 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17329 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17330 inst.instruction |= 1 << 12;
17331 inst.instruction |= (size == 64) << 8;
17332 inst.instruction &= 0xffffff00;
17333 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17334 inst.instruction |= imm >> neon_logbits (size);
17335}
17336
17337static void
17338do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17339{
17340 unsigned os = inst.operands[1].imm >> 5;
17341 unsigned type = inst.vectype.el[0].type;
17342 constraint (os != 0 && size == 8,
17343 _("can not shift offsets when accessing less than half-word"));
17344 constraint (os && os != neon_logbits (size),
17345 _("shift immediate must be 1, 2 or 3 for half-word, word"
17346 " or double-word accesses respectively"));
17347 if (inst.operands[1].reg == REG_PC)
17348 as_tsktsk (MVE_BAD_PC);
17349
17350 switch (size)
17351 {
17352 case 8:
17353 constraint (elsize >= 64, BAD_EL_TYPE);
17354 break;
17355 case 16:
17356 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17357 break;
17358 case 32:
17359 case 64:
17360 constraint (elsize != size, BAD_EL_TYPE);
17361 break;
17362 default:
17363 break;
17364 }
17365 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17366 BAD_ADDR_MODE);
17367 if (load)
17368 {
17369 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17370 _("destination register and offset register may not be"
17371 " the same"));
17372 constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17373 constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17374 BAD_EL_TYPE);
17375 inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17376 }
17377 else
17378 {
17379 constraint (type != NT_untyped, BAD_EL_TYPE);
17380 }
17381
17382 inst.instruction |= 1 << 23;
17383 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17384 inst.instruction |= inst.operands[1].reg << 16;
17385 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17386 inst.instruction |= neon_logbits (elsize) << 7;
17387 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17388 inst.instruction |= LOW4 (inst.operands[1].imm);
17389 inst.instruction |= !!os;
17390}
17391
17392static void
17393do_mve_vstr_vldr_RI (int size, int elsize, int load)
17394{
17395 enum neon_el_type type = inst.vectype.el[0].type;
17396
17397 constraint (size >= 64, BAD_ADDR_MODE);
17398 switch (size)
17399 {
17400 case 16:
17401 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17402 break;
17403 case 32:
17404 constraint (elsize != size, BAD_EL_TYPE);
17405 break;
17406 default:
17407 break;
17408 }
17409 if (load)
17410 {
17411 constraint (elsize != size && type != NT_unsigned
17412 && type != NT_signed, BAD_EL_TYPE);
17413 }
17414 else
17415 {
17416 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17417 }
17418
17419 int imm = inst.relocs[0].exp.X_add_number;
17420 int add = 1;
17421 if (imm < 0)
17422 {
17423 add = 0;
17424 imm = -imm;
17425 }
17426
17427 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17428 {
17429 switch (size)
17430 {
17431 case 8:
17432 constraint (1, _("immediate must be in the range of +/-[0,127]"));
17433 break;
17434 case 16:
17435 constraint (1, _("immediate must be a multiple of 2 in the"
17436 " range of +/-[0,254]"));
17437 break;
17438 case 32:
17439 constraint (1, _("immediate must be a multiple of 4 in the"
17440 " range of +/-[0,508]"));
17441 break;
17442 }
17443 }
17444
17445 if (size != elsize)
17446 {
17447 constraint (inst.operands[1].reg > 7, BAD_HIREG);
17448 constraint (inst.operands[0].reg > 14,
17449 _("MVE vector register in the range [Q0..Q7] expected"));
17450 inst.instruction |= (load && type == NT_unsigned) << 28;
17451 inst.instruction |= (size == 16) << 19;
17452 inst.instruction |= neon_logbits (elsize) << 7;
17453 }
17454 else
17455 {
17456 if (inst.operands[1].reg == REG_PC)
17457 as_tsktsk (MVE_BAD_PC);
17458 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17459 as_tsktsk (MVE_BAD_SP);
17460 inst.instruction |= 1 << 12;
17461 inst.instruction |= neon_logbits (size) << 7;
17462 }
17463 inst.instruction |= inst.operands[1].preind << 24;
17464 inst.instruction |= add << 23;
17465 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17466 inst.instruction |= inst.operands[1].writeback << 21;
17467 inst.instruction |= inst.operands[1].reg << 16;
17468 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17469 inst.instruction &= 0xffffff80;
17470 inst.instruction |= imm >> neon_logbits (size);
17471
17472}
17473
17474static void
17475do_mve_vstr_vldr (void)
17476{
17477 unsigned size;
17478 int load = 0;
17479
17480 if (inst.cond > COND_ALWAYS)
17481 inst.pred_insn_type = INSIDE_VPT_INSN;
17482 else
17483 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17484
17485 switch (inst.instruction)
17486 {
17487 default:
17488 gas_assert (0);
17489 break;
17490 case M_MNEM_vldrb:
17491 load = 1;
17492 /* fall through. */
17493 case M_MNEM_vstrb:
17494 size = 8;
17495 break;
17496 case M_MNEM_vldrh:
17497 load = 1;
17498 /* fall through. */
17499 case M_MNEM_vstrh:
17500 size = 16;
17501 break;
17502 case M_MNEM_vldrw:
17503 load = 1;
17504 /* fall through. */
17505 case M_MNEM_vstrw:
17506 size = 32;
17507 break;
17508 case M_MNEM_vldrd:
17509 load = 1;
17510 /* fall through. */
17511 case M_MNEM_vstrd:
17512 size = 64;
17513 break;
17514 }
17515 unsigned elsize = inst.vectype.el[0].size;
17516
17517 if (inst.operands[1].isquad)
17518 {
17519 /* We are dealing with [Q, imm]{!} cases. */
17520 do_mve_vstr_vldr_QI (size, elsize, load);
17521 }
17522 else
17523 {
17524 if (inst.operands[1].immisreg == 2)
17525 {
17526 /* We are dealing with [R, Q, {UXTW #os}] cases. */
17527 do_mve_vstr_vldr_RQ (size, elsize, load);
17528 }
17529 else if (!inst.operands[1].immisreg)
17530 {
17531 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
17532 do_mve_vstr_vldr_RI (size, elsize, load);
17533 }
17534 else
17535 constraint (1, BAD_ADDR_MODE);
17536 }
17537
17538 inst.is_neon = 1;
17539}
17540
17541static void
17542do_mve_vst_vld (void)
17543{
17544 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17545 return;
17546
17547 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17548 || inst.relocs[0].exp.X_add_number != 0
17549 || inst.operands[1].immisreg != 0,
17550 BAD_ADDR_MODE);
17551 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17552 if (inst.operands[1].reg == REG_PC)
17553 as_tsktsk (MVE_BAD_PC);
17554 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17555 as_tsktsk (MVE_BAD_SP);
17556
17557
17558 /* These instructions are one of the "exceptions" mentioned in
17559 handle_pred_state. They are MVE instructions that are not VPT compatible
17560 and do not accept a VPT code, thus appending such a code is a syntax
17561 error. */
17562 if (inst.cond > COND_ALWAYS)
17563 first_error (BAD_SYNTAX);
17564 /* If we append a scalar condition code we can set this to
17565 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
17566 else if (inst.cond < COND_ALWAYS)
17567 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17568 else
17569 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17570
17571 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17572 inst.instruction |= inst.operands[1].writeback << 21;
17573 inst.instruction |= inst.operands[1].reg << 16;
17574 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17575 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17576 inst.is_neon = 1;
17577}
17578
17579static void
17580do_mve_vaddlv (void)
17581{
17582 enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17583 struct neon_type_el et
17584 = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17585
17586 if (et.type == NT_invtype)
17587 first_error (BAD_EL_TYPE);
17588
17589 if (inst.cond > COND_ALWAYS)
17590 inst.pred_insn_type = INSIDE_VPT_INSN;
17591 else
17592 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17593
17594 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17595
17596 inst.instruction |= (et.type == NT_unsigned) << 28;
17597 inst.instruction |= inst.operands[1].reg << 19;
17598 inst.instruction |= inst.operands[0].reg << 12;
17599 inst.instruction |= inst.operands[2].reg;
17600 inst.is_neon = 1;
17601}
17602
17603static void
17604do_neon_dyadic_if_su (void)
17605{
17606 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17607 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17608 N_SUF_32 | N_KEY);
17609
17610 constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17611 || inst.instruction == ((unsigned) N_MNEM_vmin))
17612 && et.type == NT_float
17613 && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17614
17615 if (!check_simd_pred_availability (et.type == NT_float,
17616 NEON_CHECK_ARCH | NEON_CHECK_CC))
17617 return;
17618
17619 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17620}
17621
17622static void
17623do_neon_addsub_if_i (void)
17624{
17625 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17626 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17627 return;
17628
17629 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17630 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17631 N_EQK, N_IF_32 | N_I64 | N_KEY);
17632
17633 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17634 /* If we are parsing Q registers and the element types match MVE, which NEON
17635 also supports, then we must check whether this is an instruction that can
17636 be used by both MVE/NEON. This distinction can be made based on whether
17637 they are predicated or not. */
17638 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17639 {
17640 if (!check_simd_pred_availability (et.type == NT_float,
17641 NEON_CHECK_ARCH | NEON_CHECK_CC))
17642 return;
17643 }
17644 else
17645 {
17646 /* If they are either in a D register or are using an unsupported. */
17647 if (rs != NS_QQR
17648 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17649 return;
17650 }
17651
17652 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17653 affected if we specify unsigned args. */
17654 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17655}
17656
17657/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17658 result to be:
17659 V<op> A,B (A is operand 0, B is operand 2)
17660 to mean:
17661 V<op> A,B,A
17662 not:
17663 V<op> A,B,B
17664 so handle that case specially. */
17665
17666static void
17667neon_exchange_operands (void)
17668{
17669 if (inst.operands[1].present)
17670 {
17671 void *scratch = xmalloc (sizeof (inst.operands[0]));
17672
17673 /* Swap operands[1] and operands[2]. */
17674 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17675 inst.operands[1] = inst.operands[2];
17676 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17677 free (scratch);
17678 }
17679 else
17680 {
17681 inst.operands[1] = inst.operands[2];
17682 inst.operands[2] = inst.operands[0];
17683 }
17684}
17685
17686static void
17687neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17688{
17689 if (inst.operands[2].isreg)
17690 {
17691 if (invert)
17692 neon_exchange_operands ();
17693 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17694 }
17695 else
17696 {
17697 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17698 struct neon_type_el et = neon_check_type (2, rs,
17699 N_EQK | N_SIZ, immtypes | N_KEY);
17700
17701 NEON_ENCODE (IMMED, inst);
17702 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17703 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17704 inst.instruction |= LOW4 (inst.operands[1].reg);
17705 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17706 inst.instruction |= neon_quad (rs) << 6;
17707 inst.instruction |= (et.type == NT_float) << 10;
17708 inst.instruction |= neon_logbits (et.size) << 18;
17709
17710 neon_dp_fixup (&inst);
17711 }
17712}
17713
17714static void
17715do_neon_cmp (void)
17716{
17717 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
17718}
17719
17720static void
17721do_neon_cmp_inv (void)
17722{
17723 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
17724}
17725
17726static void
17727do_neon_ceq (void)
17728{
17729 neon_compare (N_IF_32, N_IF_32, FALSE);
17730}
17731
17732/* For multiply instructions, we have the possibility of 16-bit or 32-bit
17733 scalars, which are encoded in 5 bits, M : Rm.
17734 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17735 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17736 index in M.
17737
17738 Dot Product instructions are similar to multiply instructions except elsize
17739 should always be 32.
17740
17741 This function translates SCALAR, which is GAS's internal encoding of indexed
17742 scalar register, to raw encoding. There is also register and index range
17743 check based on ELSIZE. */
17744
17745static unsigned
17746neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17747{
17748 unsigned regno = NEON_SCALAR_REG (scalar);
17749 unsigned elno = NEON_SCALAR_INDEX (scalar);
17750
17751 switch (elsize)
17752 {
17753 case 16:
17754 if (regno > 7 || elno > 3)
17755 goto bad_scalar;
17756 return regno | (elno << 3);
17757
17758 case 32:
17759 if (regno > 15 || elno > 1)
17760 goto bad_scalar;
17761 return regno | (elno << 4);
17762
17763 default:
17764 bad_scalar:
17765 first_error (_("scalar out of range for multiply instruction"));
17766 }
17767
17768 return 0;
17769}
17770
17771/* Encode multiply / multiply-accumulate scalar instructions. */
17772
17773static void
17774neon_mul_mac (struct neon_type_el et, int ubit)
17775{
17776 unsigned scalar;
17777
17778 /* Give a more helpful error message if we have an invalid type. */
17779 if (et.type == NT_invtype)
17780 return;
17781
17782 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17785 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17786 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17787 inst.instruction |= LOW4 (scalar);
17788 inst.instruction |= HI1 (scalar) << 5;
17789 inst.instruction |= (et.type == NT_float) << 8;
17790 inst.instruction |= neon_logbits (et.size) << 20;
17791 inst.instruction |= (ubit != 0) << 24;
17792
17793 neon_dp_fixup (&inst);
17794}
17795
17796static void
17797do_neon_mac_maybe_scalar (void)
17798{
17799 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17800 return;
17801
17802 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17803 return;
17804
17805 if (inst.operands[2].isscalar)
17806 {
17807 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17808 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17809 struct neon_type_el et = neon_check_type (3, rs,
17810 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17811 NEON_ENCODE (SCALAR, inst);
17812 neon_mul_mac (et, neon_quad (rs));
17813 }
17814 else if (!inst.operands[2].isvec)
17815 {
17816 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17817
17818 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17819 neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17820
17821 neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17822 }
17823 else
17824 {
17825 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17826 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17827 affected if we specify unsigned args. */
17828 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17829 }
17830}
17831
17832static void
17833do_bfloat_vfma (void)
17834{
17835 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17836 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17837 enum neon_shape rs;
17838 int t_bit = 0;
17839
17840 if (inst.instruction != B_MNEM_vfmab)
17841 {
17842 t_bit = 1;
17843 inst.instruction = B_MNEM_vfmat;
17844 }
17845
17846 if (inst.operands[2].isscalar)
17847 {
17848 rs = neon_select_shape (NS_QQS, NS_NULL);
17849 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17850
17851 inst.instruction |= (1 << 25);
17852 int index = inst.operands[2].reg & 0xf;
17853 constraint (!(index < 4), _("index must be in the range 0 to 3"));
17854 inst.operands[2].reg >>= 4;
17855 constraint (!(inst.operands[2].reg < 8),
17856 _("indexed register must be less than 8"));
17857 neon_three_args (t_bit);
17858 inst.instruction |= ((index & 1) << 3);
17859 inst.instruction |= ((index & 2) << 4);
17860 }
17861 else
17862 {
17863 rs = neon_select_shape (NS_QQQ, NS_NULL);
17864 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17865 neon_three_args (t_bit);
17866 }
17867
17868}
17869
17870static void
17871do_neon_fmac (void)
17872{
17873 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17874 && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17875 return;
17876
17877 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17878 return;
17879
17880 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17881 {
17882 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17883 struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17884 N_EQK);
17885
17886 if (rs == NS_QQR)
17887 {
17888
17889 if (inst.operands[2].reg == REG_SP)
17890 as_tsktsk (MVE_BAD_SP);
17891 else if (inst.operands[2].reg == REG_PC)
17892 as_tsktsk (MVE_BAD_PC);
17893
17894 inst.instruction = 0xee310e40;
17895 inst.instruction |= (et.size == 16) << 28;
17896 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17897 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17898 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17899 inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17900 inst.instruction |= inst.operands[2].reg;
17901 inst.is_neon = 1;
17902 return;
17903 }
17904 }
17905 else
17906 {
17907 constraint (!inst.operands[2].isvec, BAD_FPU);
17908 }
17909
17910 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17911}
17912
17913static void
17914do_mve_vfma (void)
17915{
17916 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
17917 inst.cond == COND_ALWAYS)
17918 {
17919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17920 inst.instruction = N_MNEM_vfma;
17921 inst.pred_insn_type = INSIDE_VPT_INSN;
17922 inst.cond = 0xf;
17923 return do_neon_fmac();
17924 }
17925 else
17926 {
17927 do_bfloat_vfma();
17928 }
17929}
17930
17931static void
17932do_neon_tst (void)
17933{
17934 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17935 struct neon_type_el et = neon_check_type (3, rs,
17936 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
17937 neon_three_same (neon_quad (rs), 0, et.size);
17938}
17939
17940/* VMUL with 3 registers allows the P8 type. The scalar version supports the
17941 same types as the MAC equivalents. The polynomial type for this instruction
17942 is encoded the same as the integer type. */
17943
17944static void
17945do_neon_mul (void)
17946{
17947 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
17948 return;
17949
17950 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17951 return;
17952
17953 if (inst.operands[2].isscalar)
17954 {
17955 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17956 do_neon_mac_maybe_scalar ();
17957 }
17958 else
17959 {
17960 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17961 {
17962 enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17963 struct neon_type_el et
17964 = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
17965 if (et.type == NT_float)
17966 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
17967 BAD_FPU);
17968
17969 neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
17970 }
17971 else
17972 {
17973 constraint (!inst.operands[2].isvec, BAD_FPU);
17974 neon_dyadic_misc (NT_poly,
17975 N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
17976 }
17977 }
17978}
17979
17980static void
17981do_neon_qdmulh (void)
17982{
17983 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17984 return;
17985
17986 if (inst.operands[2].isscalar)
17987 {
17988 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17989 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17990 struct neon_type_el et = neon_check_type (3, rs,
17991 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
17992 NEON_ENCODE (SCALAR, inst);
17993 neon_mul_mac (et, neon_quad (rs));
17994 }
17995 else
17996 {
17997 enum neon_shape rs;
17998 struct neon_type_el et;
17999 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18000 {
18001 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18002 et = neon_check_type (3, rs,
18003 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18004 }
18005 else
18006 {
18007 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18008 et = neon_check_type (3, rs,
18009 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18010 }
18011
18012 NEON_ENCODE (INTEGER, inst);
18013 if (rs == NS_QQR)
18014 mve_encode_qqr (et.size, 0, 0);
18015 else
18016 /* The U bit (rounding) comes from bit mask. */
18017 neon_three_same (neon_quad (rs), 0, et.size);
18018 }
18019}
18020
18021static void
18022do_mve_vaddv (void)
18023{
18024 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18025 struct neon_type_el et
18026 = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18027
18028 if (et.type == NT_invtype)
18029 first_error (BAD_EL_TYPE);
18030
18031 if (inst.cond > COND_ALWAYS)
18032 inst.pred_insn_type = INSIDE_VPT_INSN;
18033 else
18034 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18035
18036 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18037
18038 mve_encode_rq (et.type == NT_unsigned, et.size);
18039}
18040
18041static void
18042do_mve_vhcadd (void)
18043{
18044 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18045 struct neon_type_el et
18046 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18047
18048 if (inst.cond > COND_ALWAYS)
18049 inst.pred_insn_type = INSIDE_VPT_INSN;
18050 else
18051 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18052
18053 unsigned rot = inst.relocs[0].exp.X_add_number;
18054 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18055
18056 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18057 as_tsktsk (_("Warning: 32-bit element size and same first and third "
18058 "operand makes instruction UNPREDICTABLE"));
18059
18060 mve_encode_qqq (0, et.size);
18061 inst.instruction |= (rot == 270) << 12;
18062 inst.is_neon = 1;
18063}
18064
18065static void
18066do_mve_vqdmull (void)
18067{
18068 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18069 struct neon_type_el et
18070 = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18071
18072 if (et.size == 32
18073 && (inst.operands[0].reg == inst.operands[1].reg
18074 || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18075 as_tsktsk (BAD_MVE_SRCDEST);
18076
18077 if (inst.cond > COND_ALWAYS)
18078 inst.pred_insn_type = INSIDE_VPT_INSN;
18079 else
18080 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18081
18082 if (rs == NS_QQQ)
18083 {
18084 mve_encode_qqq (et.size == 32, 64);
18085 inst.instruction |= 1;
18086 }
18087 else
18088 {
18089 mve_encode_qqr (64, et.size == 32, 0);
18090 inst.instruction |= 0x3 << 5;
18091 }
18092}
18093
18094static void
18095do_mve_vadc (void)
18096{
18097 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18098 struct neon_type_el et
18099 = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18100
18101 if (et.type == NT_invtype)
18102 first_error (BAD_EL_TYPE);
18103
18104 if (inst.cond > COND_ALWAYS)
18105 inst.pred_insn_type = INSIDE_VPT_INSN;
18106 else
18107 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18108
18109 mve_encode_qqq (0, 64);
18110}
18111
18112static void
18113do_mve_vbrsr (void)
18114{
18115 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18116 struct neon_type_el et
18117 = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18118
18119 if (inst.cond > COND_ALWAYS)
18120 inst.pred_insn_type = INSIDE_VPT_INSN;
18121 else
18122 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18123
18124 mve_encode_qqr (et.size, 0, 0);
18125}
18126
18127static void
18128do_mve_vsbc (void)
18129{
18130 neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18131
18132 if (inst.cond > COND_ALWAYS)
18133 inst.pred_insn_type = INSIDE_VPT_INSN;
18134 else
18135 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18136
18137 mve_encode_qqq (1, 64);
18138}
18139
18140static void
18141do_mve_vmulh (void)
18142{
18143 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18144 struct neon_type_el et
18145 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18146
18147 if (inst.cond > COND_ALWAYS)
18148 inst.pred_insn_type = INSIDE_VPT_INSN;
18149 else
18150 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18151
18152 mve_encode_qqq (et.type == NT_unsigned, et.size);
18153}
18154
18155static void
18156do_mve_vqdmlah (void)
18157{
18158 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18159 struct neon_type_el et
18160 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18161
18162 if (inst.cond > COND_ALWAYS)
18163 inst.pred_insn_type = INSIDE_VPT_INSN;
18164 else
18165 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18166
18167 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18168}
18169
18170static void
18171do_mve_vqdmladh (void)
18172{
18173 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18174 struct neon_type_el et
18175 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18176
18177 if (inst.cond > COND_ALWAYS)
18178 inst.pred_insn_type = INSIDE_VPT_INSN;
18179 else
18180 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18181
18182 mve_encode_qqq (0, et.size);
18183}
18184
18185
18186static void
18187do_mve_vmull (void)
18188{
18189
18190 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18191 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18192 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
18193 && inst.cond == COND_ALWAYS
18194 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18195 {
18196 if (rs == NS_QQQ)
18197 {
18198
18199 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18200 N_SUF_32 | N_F64 | N_P8
18201 | N_P16 | N_I_MVE | N_KEY);
18202 if (((et.type == NT_poly) && et.size == 8
18203 && ARM_CPU_IS_ANY (cpu_variant))
18204 || (et.type == NT_integer) || (et.type == NT_float))
18205 goto neon_vmul;
18206 }
18207 else
18208 goto neon_vmul;
18209 }
18210
18211 constraint (rs != NS_QQQ, BAD_FPU);
18212 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18213 N_SU_32 | N_P8 | N_P16 | N_KEY);
18214
18215 /* We are dealing with MVE's vmullt. */
18216 if (et.size == 32
18217 && (inst.operands[0].reg == inst.operands[1].reg
18218 || inst.operands[0].reg == inst.operands[2].reg))
18219 as_tsktsk (BAD_MVE_SRCDEST);
18220
18221 if (inst.cond > COND_ALWAYS)
18222 inst.pred_insn_type = INSIDE_VPT_INSN;
18223 else
18224 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18225
18226 if (et.type == NT_poly)
18227 mve_encode_qqq (neon_logbits (et.size), 64);
18228 else
18229 mve_encode_qqq (et.type == NT_unsigned, et.size);
18230
18231 return;
18232
18233neon_vmul:
18234 inst.instruction = N_MNEM_vmul;
18235 inst.cond = 0xb;
18236 if (thumb_mode)
18237 inst.pred_insn_type = INSIDE_IT_INSN;
18238 do_neon_mul ();
18239}
18240
18241static void
18242do_mve_vabav (void)
18243{
18244 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18245
18246 if (rs == NS_NULL)
18247 return;
18248
18249 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18250 return;
18251
18252 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18253 | N_S16 | N_S32 | N_U8 | N_U16
18254 | N_U32);
18255
18256 if (inst.cond > COND_ALWAYS)
18257 inst.pred_insn_type = INSIDE_VPT_INSN;
18258 else
18259 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18260
18261 mve_encode_rqq (et.type == NT_unsigned, et.size);
18262}
18263
18264static void
18265do_mve_vmladav (void)
18266{
18267 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18268 struct neon_type_el et = neon_check_type (3, rs,
18269 N_EQK, N_EQK, N_SU_MVE | N_KEY);
18270
18271 if (et.type == NT_unsigned
18272 && (inst.instruction == M_MNEM_vmladavx
18273 || inst.instruction == M_MNEM_vmladavax
18274 || inst.instruction == M_MNEM_vmlsdav
18275 || inst.instruction == M_MNEM_vmlsdava
18276 || inst.instruction == M_MNEM_vmlsdavx
18277 || inst.instruction == M_MNEM_vmlsdavax))
18278 first_error (BAD_SIMD_TYPE);
18279
18280 constraint (inst.operands[2].reg > 14,
18281 _("MVE vector register in the range [Q0..Q7] expected"));
18282
18283 if (inst.cond > COND_ALWAYS)
18284 inst.pred_insn_type = INSIDE_VPT_INSN;
18285 else
18286 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18287
18288 if (inst.instruction == M_MNEM_vmlsdav
18289 || inst.instruction == M_MNEM_vmlsdava
18290 || inst.instruction == M_MNEM_vmlsdavx
18291 || inst.instruction == M_MNEM_vmlsdavax)
18292 inst.instruction |= (et.size == 8) << 28;
18293 else
18294 inst.instruction |= (et.size == 8) << 8;
18295
18296 mve_encode_rqq (et.type == NT_unsigned, 64);
18297 inst.instruction |= (et.size == 32) << 16;
18298}
18299
18300static void
18301do_mve_vmlaldav (void)
18302{
18303 enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18304 struct neon_type_el et
18305 = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18306 N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18307
18308 if (et.type == NT_unsigned
18309 && (inst.instruction == M_MNEM_vmlsldav
18310 || inst.instruction == M_MNEM_vmlsldava
18311 || inst.instruction == M_MNEM_vmlsldavx
18312 || inst.instruction == M_MNEM_vmlsldavax))
18313 first_error (BAD_SIMD_TYPE);
18314
18315 if (inst.cond > COND_ALWAYS)
18316 inst.pred_insn_type = INSIDE_VPT_INSN;
18317 else
18318 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18319
18320 mve_encode_rrqq (et.type == NT_unsigned, et.size);
18321}
18322
18323static void
18324do_mve_vrmlaldavh (void)
18325{
18326 struct neon_type_el et;
18327 if (inst.instruction == M_MNEM_vrmlsldavh
18328 || inst.instruction == M_MNEM_vrmlsldavha
18329 || inst.instruction == M_MNEM_vrmlsldavhx
18330 || inst.instruction == M_MNEM_vrmlsldavhax)
18331 {
18332 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18333 if (inst.operands[1].reg == REG_SP)
18334 as_tsktsk (MVE_BAD_SP);
18335 }
18336 else
18337 {
18338 if (inst.instruction == M_MNEM_vrmlaldavhx
18339 || inst.instruction == M_MNEM_vrmlaldavhax)
18340 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18341 else
18342 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18343 N_U32 | N_S32 | N_KEY);
18344 /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18345 with vmax/min instructions, making the use of SP in assembly really
18346 nonsensical, so instead of issuing a warning like we do for other uses
18347 of SP for the odd register operand we error out. */
18348 constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18349 }
18350
18351 /* Make sure we still check the second operand is an odd one and that PC is
18352 disallowed. This because we are parsing for any GPR operand, to be able
18353 to distinguish between giving a warning or an error for SP as described
18354 above. */
18355 constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18356 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18357
18358 if (inst.cond > COND_ALWAYS)
18359 inst.pred_insn_type = INSIDE_VPT_INSN;
18360 else
18361 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18362
18363 mve_encode_rrqq (et.type == NT_unsigned, 0);
18364}
18365
18366
18367static void
18368do_mve_vmaxnmv (void)
18369{
18370 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18371 struct neon_type_el et
18372 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18373
18374 if (inst.cond > COND_ALWAYS)
18375 inst.pred_insn_type = INSIDE_VPT_INSN;
18376 else
18377 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18378
18379 if (inst.operands[0].reg == REG_SP)
18380 as_tsktsk (MVE_BAD_SP);
18381 else if (inst.operands[0].reg == REG_PC)
18382 as_tsktsk (MVE_BAD_PC);
18383
18384 mve_encode_rq (et.size == 16, 64);
18385}
18386
18387static void
18388do_mve_vmaxv (void)
18389{
18390 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18391 struct neon_type_el et;
18392
18393 if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18394 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18395 else
18396 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18397
18398 if (inst.cond > COND_ALWAYS)
18399 inst.pred_insn_type = INSIDE_VPT_INSN;
18400 else
18401 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18402
18403 if (inst.operands[0].reg == REG_SP)
18404 as_tsktsk (MVE_BAD_SP);
18405 else if (inst.operands[0].reg == REG_PC)
18406 as_tsktsk (MVE_BAD_PC);
18407
18408 mve_encode_rq (et.type == NT_unsigned, et.size);
18409}
18410
18411
18412static void
18413do_neon_qrdmlah (void)
18414{
18415 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18416 return;
18417 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18418 {
18419 /* Check we're on the correct architecture. */
18420 if (!mark_feature_used (&fpu_neon_ext_armv8))
18421 inst.error
18422 = _("instruction form not available on this architecture.");
18423 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18424 {
18425 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18426 record_feature_use (&fpu_neon_ext_v8_1);
18427 }
18428 if (inst.operands[2].isscalar)
18429 {
18430 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18431 struct neon_type_el et = neon_check_type (3, rs,
18432 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18433 NEON_ENCODE (SCALAR, inst);
18434 neon_mul_mac (et, neon_quad (rs));
18435 }
18436 else
18437 {
18438 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18439 struct neon_type_el et = neon_check_type (3, rs,
18440 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18441 NEON_ENCODE (INTEGER, inst);
18442 /* The U bit (rounding) comes from bit mask. */
18443 neon_three_same (neon_quad (rs), 0, et.size);
18444 }
18445 }
18446 else
18447 {
18448 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18449 struct neon_type_el et
18450 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18451
18452 NEON_ENCODE (INTEGER, inst);
18453 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18454 }
18455}
18456
18457static void
18458do_neon_fcmp_absolute (void)
18459{
18460 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18461 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18462 N_F_16_32 | N_KEY);
18463 /* Size field comes from bit mask. */
18464 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18465}
18466
18467static void
18468do_neon_fcmp_absolute_inv (void)
18469{
18470 neon_exchange_operands ();
18471 do_neon_fcmp_absolute ();
18472}
18473
18474static void
18475do_neon_step (void)
18476{
18477 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18478 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18479 N_F_16_32 | N_KEY);
18480 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18481}
18482
18483static void
18484do_neon_abs_neg (void)
18485{
18486 enum neon_shape rs;
18487 struct neon_type_el et;
18488
18489 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18490 return;
18491
18492 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18493 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18494
18495 if (!check_simd_pred_availability (et.type == NT_float,
18496 NEON_CHECK_ARCH | NEON_CHECK_CC))
18497 return;
18498
18499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18501 inst.instruction |= LOW4 (inst.operands[1].reg);
18502 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18503 inst.instruction |= neon_quad (rs) << 6;
18504 inst.instruction |= (et.type == NT_float) << 10;
18505 inst.instruction |= neon_logbits (et.size) << 18;
18506
18507 neon_dp_fixup (&inst);
18508}
18509
18510static void
18511do_neon_sli (void)
18512{
18513 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18514 return;
18515
18516 enum neon_shape rs;
18517 struct neon_type_el et;
18518 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18519 {
18520 rs = neon_select_shape (NS_QQI, NS_NULL);
18521 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18522 }
18523 else
18524 {
18525 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18526 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18527 }
18528
18529
18530 int imm = inst.operands[2].imm;
18531 constraint (imm < 0 || (unsigned)imm >= et.size,
18532 _("immediate out of range for insert"));
18533 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18534}
18535
18536static void
18537do_neon_sri (void)
18538{
18539 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18540 return;
18541
18542 enum neon_shape rs;
18543 struct neon_type_el et;
18544 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18545 {
18546 rs = neon_select_shape (NS_QQI, NS_NULL);
18547 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18548 }
18549 else
18550 {
18551 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18552 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18553 }
18554
18555 int imm = inst.operands[2].imm;
18556 constraint (imm < 1 || (unsigned)imm > et.size,
18557 _("immediate out of range for insert"));
18558 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
18559}
18560
18561static void
18562do_neon_qshlu_imm (void)
18563{
18564 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18565 return;
18566
18567 enum neon_shape rs;
18568 struct neon_type_el et;
18569 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18570 {
18571 rs = neon_select_shape (NS_QQI, NS_NULL);
18572 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18573 }
18574 else
18575 {
18576 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18577 et = neon_check_type (2, rs, N_EQK | N_UNS,
18578 N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18579 }
18580
18581 int imm = inst.operands[2].imm;
18582 constraint (imm < 0 || (unsigned)imm >= et.size,
18583 _("immediate out of range for shift"));
18584 /* Only encodes the 'U present' variant of the instruction.
18585 In this case, signed types have OP (bit 8) set to 0.
18586 Unsigned types have OP set to 1. */
18587 inst.instruction |= (et.type == NT_unsigned) << 8;
18588 /* The rest of the bits are the same as other immediate shifts. */
18589 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18590}
18591
18592static void
18593do_neon_qmovn (void)
18594{
18595 struct neon_type_el et = neon_check_type (2, NS_DQ,
18596 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18597 /* Saturating move where operands can be signed or unsigned, and the
18598 destination has the same signedness. */
18599 NEON_ENCODE (INTEGER, inst);
18600 if (et.type == NT_unsigned)
18601 inst.instruction |= 0xc0;
18602 else
18603 inst.instruction |= 0x80;
18604 neon_two_same (0, 1, et.size / 2);
18605}
18606
18607static void
18608do_neon_qmovun (void)
18609{
18610 struct neon_type_el et = neon_check_type (2, NS_DQ,
18611 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18612 /* Saturating move with unsigned results. Operands must be signed. */
18613 NEON_ENCODE (INTEGER, inst);
18614 neon_two_same (0, 1, et.size / 2);
18615}
18616
18617static void
18618do_neon_rshift_sat_narrow (void)
18619{
18620 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18621 or unsigned. If operands are unsigned, results must also be unsigned. */
18622 struct neon_type_el et = neon_check_type (2, NS_DQI,
18623 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18624 int imm = inst.operands[2].imm;
18625 /* This gets the bounds check, size encoding and immediate bits calculation
18626 right. */
18627 et.size /= 2;
18628
18629 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18630 VQMOVN.I<size> <Dd>, <Qm>. */
18631 if (imm == 0)
18632 {
18633 inst.operands[2].present = 0;
18634 inst.instruction = N_MNEM_vqmovn;
18635 do_neon_qmovn ();
18636 return;
18637 }
18638
18639 constraint (imm < 1 || (unsigned)imm > et.size,
18640 _("immediate out of range"));
18641 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
18642}
18643
18644static void
18645do_neon_rshift_sat_narrow_u (void)
18646{
18647 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18648 or unsigned. If operands are unsigned, results must also be unsigned. */
18649 struct neon_type_el et = neon_check_type (2, NS_DQI,
18650 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18651 int imm = inst.operands[2].imm;
18652 /* This gets the bounds check, size encoding and immediate bits calculation
18653 right. */
18654 et.size /= 2;
18655
18656 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18657 VQMOVUN.I<size> <Dd>, <Qm>. */
18658 if (imm == 0)
18659 {
18660 inst.operands[2].present = 0;
18661 inst.instruction = N_MNEM_vqmovun;
18662 do_neon_qmovun ();
18663 return;
18664 }
18665
18666 constraint (imm < 1 || (unsigned)imm > et.size,
18667 _("immediate out of range"));
18668 /* FIXME: The manual is kind of unclear about what value U should have in
18669 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18670 must be 1. */
18671 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
18672}
18673
18674static void
18675do_neon_movn (void)
18676{
18677 struct neon_type_el et = neon_check_type (2, NS_DQ,
18678 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18679 NEON_ENCODE (INTEGER, inst);
18680 neon_two_same (0, 1, et.size / 2);
18681}
18682
18683static void
18684do_neon_rshift_narrow (void)
18685{
18686 struct neon_type_el et = neon_check_type (2, NS_DQI,
18687 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18688 int imm = inst.operands[2].imm;
18689 /* This gets the bounds check, size encoding and immediate bits calculation
18690 right. */
18691 et.size /= 2;
18692
18693 /* If immediate is zero then we are a pseudo-instruction for
18694 VMOVN.I<size> <Dd>, <Qm> */
18695 if (imm == 0)
18696 {
18697 inst.operands[2].present = 0;
18698 inst.instruction = N_MNEM_vmovn;
18699 do_neon_movn ();
18700 return;
18701 }
18702
18703 constraint (imm < 1 || (unsigned)imm > et.size,
18704 _("immediate out of range for narrowing operation"));
18705 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
18706}
18707
18708static void
18709do_neon_shll (void)
18710{
18711 /* FIXME: Type checking when lengthening. */
18712 struct neon_type_el et = neon_check_type (2, NS_QDI,
18713 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18714 unsigned imm = inst.operands[2].imm;
18715
18716 if (imm == et.size)
18717 {
18718 /* Maximum shift variant. */
18719 NEON_ENCODE (INTEGER, inst);
18720 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18721 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18722 inst.instruction |= LOW4 (inst.operands[1].reg);
18723 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18724 inst.instruction |= neon_logbits (et.size) << 18;
18725
18726 neon_dp_fixup (&inst);
18727 }
18728 else
18729 {
18730 /* A more-specific type check for non-max versions. */
18731 et = neon_check_type (2, NS_QDI,
18732 N_EQK | N_DBL, N_SU_32 | N_KEY);
18733 NEON_ENCODE (IMMED, inst);
18734 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
18735 }
18736}
18737
18738/* Check the various types for the VCVT instruction, and return which version
18739 the current instruction is. */
18740
18741#define CVT_FLAVOUR_VAR \
18742 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
18743 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
18744 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
18745 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
18746 /* Half-precision conversions. */ \
18747 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18748 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18749 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
18750 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
18751 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
18752 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
18753 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
18754 Compared with single/double precision variants, only the co-processor \
18755 field is different, so the encoding flow is reused here. */ \
18756 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
18757 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
18758 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18759 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18760 CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg, NULL, NULL, NULL) \
18761 /* VFP instructions. */ \
18762 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
18763 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
18764 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18765 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18766 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
18767 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
18768 /* VFP instructions with bitshift. */ \
18769 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
18770 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
18771 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
18772 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
18773 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
18774 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
18775 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
18776 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
18777
18778#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18779 neon_cvt_flavour_##C,
18780
18781/* The different types of conversions we can do. */
18782enum neon_cvt_flavour
18783{
18784 CVT_FLAVOUR_VAR
18785 neon_cvt_flavour_invalid,
18786 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18787};
18788
18789#undef CVT_VAR
18790
18791static enum neon_cvt_flavour
18792get_neon_cvt_flavour (enum neon_shape rs)
18793{
18794#define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
18795 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
18796 if (et.type != NT_invtype) \
18797 { \
18798 inst.error = NULL; \
18799 return (neon_cvt_flavour_##C); \
18800 }
18801
18802 struct neon_type_el et;
18803 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18804 || rs == NS_FF) ? N_VFP : 0;
18805 /* The instruction versions which take an immediate take one register
18806 argument, which is extended to the width of the full register. Thus the
18807 "source" and "destination" registers must have the same width. Hack that
18808 here by making the size equal to the key (wider, in this case) operand. */
18809 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18810
18811 CVT_FLAVOUR_VAR;
18812
18813 return neon_cvt_flavour_invalid;
18814#undef CVT_VAR
18815}
18816
18817enum neon_cvt_mode
18818{
18819 neon_cvt_mode_a,
18820 neon_cvt_mode_n,
18821 neon_cvt_mode_p,
18822 neon_cvt_mode_m,
18823 neon_cvt_mode_z,
18824 neon_cvt_mode_x,
18825 neon_cvt_mode_r
18826};
18827
18828/* Neon-syntax VFP conversions. */
18829
18830static void
18831do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18832{
18833 const char *opname = 0;
18834
18835 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18836 || rs == NS_FHI || rs == NS_HFI)
18837 {
18838 /* Conversions with immediate bitshift. */
18839 const char *enc[] =
18840 {
18841#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18842 CVT_FLAVOUR_VAR
18843 NULL
18844#undef CVT_VAR
18845 };
18846
18847 if (flavour < (int) ARRAY_SIZE (enc))
18848 {
18849 opname = enc[flavour];
18850 constraint (inst.operands[0].reg != inst.operands[1].reg,
18851 _("operands 0 and 1 must be the same register"));
18852 inst.operands[1] = inst.operands[2];
18853 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18854 }
18855 }
18856 else
18857 {
18858 /* Conversions without bitshift. */
18859 const char *enc[] =
18860 {
18861#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18862 CVT_FLAVOUR_VAR
18863 NULL
18864#undef CVT_VAR
18865 };
18866
18867 if (flavour < (int) ARRAY_SIZE (enc))
18868 opname = enc[flavour];
18869 }
18870
18871 if (opname)
18872 do_vfp_nsyn_opcode (opname);
18873
18874 /* ARMv8.2 fp16 VCVT instruction. */
18875 if (flavour == neon_cvt_flavour_s32_f16
18876 || flavour == neon_cvt_flavour_u32_f16
18877 || flavour == neon_cvt_flavour_f16_u32
18878 || flavour == neon_cvt_flavour_f16_s32)
18879 do_scalar_fp16_v82_encode ();
18880}
18881
18882static void
18883do_vfp_nsyn_cvtz (void)
18884{
18885 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18886 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18887 const char *enc[] =
18888 {
18889#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18890 CVT_FLAVOUR_VAR
18891 NULL
18892#undef CVT_VAR
18893 };
18894
18895 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18896 do_vfp_nsyn_opcode (enc[flavour]);
18897}
18898
18899static void
18900do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18901 enum neon_cvt_mode mode)
18902{
18903 int sz, op;
18904 int rm;
18905
18906 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18907 D register operands. */
18908 if (flavour == neon_cvt_flavour_s32_f64
18909 || flavour == neon_cvt_flavour_u32_f64)
18910 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18911 _(BAD_FPU));
18912
18913 if (flavour == neon_cvt_flavour_s32_f16
18914 || flavour == neon_cvt_flavour_u32_f16)
18915 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18916 _(BAD_FP16));
18917
18918 set_pred_insn_type (OUTSIDE_PRED_INSN);
18919
18920 switch (flavour)
18921 {
18922 case neon_cvt_flavour_s32_f64:
18923 sz = 1;
18924 op = 1;
18925 break;
18926 case neon_cvt_flavour_s32_f32:
18927 sz = 0;
18928 op = 1;
18929 break;
18930 case neon_cvt_flavour_s32_f16:
18931 sz = 0;
18932 op = 1;
18933 break;
18934 case neon_cvt_flavour_u32_f64:
18935 sz = 1;
18936 op = 0;
18937 break;
18938 case neon_cvt_flavour_u32_f32:
18939 sz = 0;
18940 op = 0;
18941 break;
18942 case neon_cvt_flavour_u32_f16:
18943 sz = 0;
18944 op = 0;
18945 break;
18946 default:
18947 first_error (_("invalid instruction shape"));
18948 return;
18949 }
18950
18951 switch (mode)
18952 {
18953 case neon_cvt_mode_a: rm = 0; break;
18954 case neon_cvt_mode_n: rm = 1; break;
18955 case neon_cvt_mode_p: rm = 2; break;
18956 case neon_cvt_mode_m: rm = 3; break;
18957 default: first_error (_("invalid rounding mode")); return;
18958 }
18959
18960 NEON_ENCODE (FPV8, inst);
18961 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
18962 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
18963 inst.instruction |= sz << 8;
18964
18965 /* ARMv8.2 fp16 VCVT instruction. */
18966 if (flavour == neon_cvt_flavour_s32_f16
18967 ||flavour == neon_cvt_flavour_u32_f16)
18968 do_scalar_fp16_v82_encode ();
18969 inst.instruction |= op << 7;
18970 inst.instruction |= rm << 16;
18971 inst.instruction |= 0xf0000000;
18972 inst.is_neon = TRUE;
18973}
18974
18975static void
18976do_neon_cvt_1 (enum neon_cvt_mode mode)
18977{
18978 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
18979 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
18980 NS_FH, NS_HF, NS_FHI, NS_HFI,
18981 NS_NULL);
18982 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18983
18984 if (flavour == neon_cvt_flavour_invalid)
18985 return;
18986
18987 /* PR11109: Handle round-to-zero for VCVT conversions. */
18988 if (mode == neon_cvt_mode_z
18989 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
18990 && (flavour == neon_cvt_flavour_s16_f16
18991 || flavour == neon_cvt_flavour_u16_f16
18992 || flavour == neon_cvt_flavour_s32_f32
18993 || flavour == neon_cvt_flavour_u32_f32
18994 || flavour == neon_cvt_flavour_s32_f64
18995 || flavour == neon_cvt_flavour_u32_f64)
18996 && (rs == NS_FD || rs == NS_FF))
18997 {
18998 do_vfp_nsyn_cvtz ();
18999 return;
19000 }
19001
19002 /* ARMv8.2 fp16 VCVT conversions. */
19003 if (mode == neon_cvt_mode_z
19004 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19005 && (flavour == neon_cvt_flavour_s32_f16
19006 || flavour == neon_cvt_flavour_u32_f16)
19007 && (rs == NS_FH))
19008 {
19009 do_vfp_nsyn_cvtz ();
19010 do_scalar_fp16_v82_encode ();
19011 return;
19012 }
19013
19014 /* VFP rather than Neon conversions. */
19015 if (flavour >= neon_cvt_flavour_first_fp)
19016 {
19017 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19018 do_vfp_nsyn_cvt (rs, flavour);
19019 else
19020 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19021
19022 return;
19023 }
19024
19025 switch (rs)
19026 {
19027 case NS_QQI:
19028 if (mode == neon_cvt_mode_z
19029 && (flavour == neon_cvt_flavour_f16_s16
19030 || flavour == neon_cvt_flavour_f16_u16
19031 || flavour == neon_cvt_flavour_s16_f16
19032 || flavour == neon_cvt_flavour_u16_f16
19033 || flavour == neon_cvt_flavour_f32_u32
19034 || flavour == neon_cvt_flavour_f32_s32
19035 || flavour == neon_cvt_flavour_s32_f32
19036 || flavour == neon_cvt_flavour_u32_f32))
19037 {
19038 if (!check_simd_pred_availability (TRUE,
19039 NEON_CHECK_CC | NEON_CHECK_ARCH))
19040 return;
19041 }
19042 else if (mode == neon_cvt_mode_n)
19043 {
19044 /* We are dealing with vcvt with the 'ne' condition. */
19045 inst.cond = 0x1;
19046 inst.instruction = N_MNEM_vcvt;
19047 do_neon_cvt_1 (neon_cvt_mode_z);
19048 return;
19049 }
19050 /* fall through. */
19051 case NS_DDI:
19052 {
19053 unsigned immbits;
19054 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19055 0x0000100, 0x1000100, 0x0, 0x1000000};
19056
19057 if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19058 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19059 return;
19060
19061 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19062 {
19063 constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19064 _("immediate value out of range"));
19065 switch (flavour)
19066 {
19067 case neon_cvt_flavour_f16_s16:
19068 case neon_cvt_flavour_f16_u16:
19069 case neon_cvt_flavour_s16_f16:
19070 case neon_cvt_flavour_u16_f16:
19071 constraint (inst.operands[2].imm > 16,
19072 _("immediate value out of range"));
19073 break;
19074 case neon_cvt_flavour_f32_u32:
19075 case neon_cvt_flavour_f32_s32:
19076 case neon_cvt_flavour_s32_f32:
19077 case neon_cvt_flavour_u32_f32:
19078 constraint (inst.operands[2].imm > 32,
19079 _("immediate value out of range"));
19080 break;
19081 default:
19082 inst.error = BAD_FPU;
19083 return;
19084 }
19085 }
19086
19087 /* Fixed-point conversion with #0 immediate is encoded as an
19088 integer conversion. */
19089 if (inst.operands[2].present && inst.operands[2].imm == 0)
19090 goto int_encode;
19091 NEON_ENCODE (IMMED, inst);
19092 if (flavour != neon_cvt_flavour_invalid)
19093 inst.instruction |= enctab[flavour];
19094 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19095 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19096 inst.instruction |= LOW4 (inst.operands[1].reg);
19097 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19098 inst.instruction |= neon_quad (rs) << 6;
19099 inst.instruction |= 1 << 21;
19100 if (flavour < neon_cvt_flavour_s16_f16)
19101 {
19102 inst.instruction |= 1 << 21;
19103 immbits = 32 - inst.operands[2].imm;
19104 inst.instruction |= immbits << 16;
19105 }
19106 else
19107 {
19108 inst.instruction |= 3 << 20;
19109 immbits = 16 - inst.operands[2].imm;
19110 inst.instruction |= immbits << 16;
19111 inst.instruction &= ~(1 << 9);
19112 }
19113
19114 neon_dp_fixup (&inst);
19115 }
19116 break;
19117
19118 case NS_QQ:
19119 if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19120 || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19121 && (flavour == neon_cvt_flavour_s16_f16
19122 || flavour == neon_cvt_flavour_u16_f16
19123 || flavour == neon_cvt_flavour_s32_f32
19124 || flavour == neon_cvt_flavour_u32_f32))
19125 {
19126 if (!check_simd_pred_availability (TRUE,
19127 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19128 return;
19129 }
19130 else if (mode == neon_cvt_mode_z
19131 && (flavour == neon_cvt_flavour_f16_s16
19132 || flavour == neon_cvt_flavour_f16_u16
19133 || flavour == neon_cvt_flavour_s16_f16
19134 || flavour == neon_cvt_flavour_u16_f16
19135 || flavour == neon_cvt_flavour_f32_u32
19136 || flavour == neon_cvt_flavour_f32_s32
19137 || flavour == neon_cvt_flavour_s32_f32
19138 || flavour == neon_cvt_flavour_u32_f32))
19139 {
19140 if (!check_simd_pred_availability (TRUE,
19141 NEON_CHECK_CC | NEON_CHECK_ARCH))
19142 return;
19143 }
19144 /* fall through. */
19145 case NS_DD:
19146 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19147 {
19148
19149 NEON_ENCODE (FLOAT, inst);
19150 if (!check_simd_pred_availability (TRUE,
19151 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19152 return;
19153
19154 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19155 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19156 inst.instruction |= LOW4 (inst.operands[1].reg);
19157 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19158 inst.instruction |= neon_quad (rs) << 6;
19159 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19160 || flavour == neon_cvt_flavour_u32_f32) << 7;
19161 inst.instruction |= mode << 8;
19162 if (flavour == neon_cvt_flavour_u16_f16
19163 || flavour == neon_cvt_flavour_s16_f16)
19164 /* Mask off the original size bits and reencode them. */
19165 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19166
19167 if (thumb_mode)
19168 inst.instruction |= 0xfc000000;
19169 else
19170 inst.instruction |= 0xf0000000;
19171 }
19172 else
19173 {
19174 int_encode:
19175 {
19176 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19177 0x100, 0x180, 0x0, 0x080};
19178
19179 NEON_ENCODE (INTEGER, inst);
19180
19181 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19182 {
19183 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19184 return;
19185 }
19186
19187 if (flavour != neon_cvt_flavour_invalid)
19188 inst.instruction |= enctab[flavour];
19189
19190 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19191 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19192 inst.instruction |= LOW4 (inst.operands[1].reg);
19193 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19194 inst.instruction |= neon_quad (rs) << 6;
19195 if (flavour >= neon_cvt_flavour_s16_f16
19196 && flavour <= neon_cvt_flavour_f16_u16)
19197 /* Half precision. */
19198 inst.instruction |= 1 << 18;
19199 else
19200 inst.instruction |= 2 << 18;
19201
19202 neon_dp_fixup (&inst);
19203 }
19204 }
19205 break;
19206
19207 /* Half-precision conversions for Advanced SIMD -- neon. */
19208 case NS_QD:
19209 case NS_DQ:
19210 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19211 return;
19212
19213 if ((rs == NS_DQ)
19214 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19215 {
19216 as_bad (_("operand size must match register width"));
19217 break;
19218 }
19219
19220 if ((rs == NS_QD)
19221 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19222 {
19223 as_bad (_("operand size must match register width"));
19224 break;
19225 }
19226
19227 if (rs == NS_DQ)
19228 {
19229 if (flavour == neon_cvt_flavour_bf16_f32)
19230 {
19231 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19232 return;
19233 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19234 /* VCVT.bf16.f32. */
19235 inst.instruction = 0x11b60640;
19236 }
19237 else
19238 /* VCVT.f16.f32. */
19239 inst.instruction = 0x3b60600;
19240 }
19241 else
19242 /* VCVT.f32.f16. */
19243 inst.instruction = 0x3b60700;
19244
19245 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19246 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19247 inst.instruction |= LOW4 (inst.operands[1].reg);
19248 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19249 neon_dp_fixup (&inst);
19250 break;
19251
19252 default:
19253 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
19254 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19255 do_vfp_nsyn_cvt (rs, flavour);
19256 else
19257 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19258 }
19259}
19260
19261static void
19262do_neon_cvtr (void)
19263{
19264 do_neon_cvt_1 (neon_cvt_mode_x);
19265}
19266
19267static void
19268do_neon_cvt (void)
19269{
19270 do_neon_cvt_1 (neon_cvt_mode_z);
19271}
19272
19273static void
19274do_neon_cvta (void)
19275{
19276 do_neon_cvt_1 (neon_cvt_mode_a);
19277}
19278
19279static void
19280do_neon_cvtn (void)
19281{
19282 do_neon_cvt_1 (neon_cvt_mode_n);
19283}
19284
19285static void
19286do_neon_cvtp (void)
19287{
19288 do_neon_cvt_1 (neon_cvt_mode_p);
19289}
19290
19291static void
19292do_neon_cvtm (void)
19293{
19294 do_neon_cvt_1 (neon_cvt_mode_m);
19295}
19296
19297static void
19298do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
19299{
19300 if (is_double)
19301 mark_feature_used (&fpu_vfp_ext_armv8);
19302
19303 encode_arm_vfp_reg (inst.operands[0].reg,
19304 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19305 encode_arm_vfp_reg (inst.operands[1].reg,
19306 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19307 inst.instruction |= to ? 0x10000 : 0;
19308 inst.instruction |= t ? 0x80 : 0;
19309 inst.instruction |= is_double ? 0x100 : 0;
19310 do_vfp_cond_or_thumb ();
19311}
19312
19313static void
19314do_neon_cvttb_1 (bfd_boolean t)
19315{
19316 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19317 NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19318
19319 if (rs == NS_NULL)
19320 return;
19321 else if (rs == NS_QQ || rs == NS_QQI)
19322 {
19323 int single_to_half = 0;
19324 if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
19325 return;
19326
19327 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19328
19329 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19330 && (flavour == neon_cvt_flavour_u16_f16
19331 || flavour == neon_cvt_flavour_s16_f16
19332 || flavour == neon_cvt_flavour_f16_s16
19333 || flavour == neon_cvt_flavour_f16_u16
19334 || flavour == neon_cvt_flavour_u32_f32
19335 || flavour == neon_cvt_flavour_s32_f32
19336 || flavour == neon_cvt_flavour_f32_s32
19337 || flavour == neon_cvt_flavour_f32_u32))
19338 {
19339 inst.cond = 0xf;
19340 inst.instruction = N_MNEM_vcvt;
19341 set_pred_insn_type (INSIDE_VPT_INSN);
19342 do_neon_cvt_1 (neon_cvt_mode_z);
19343 return;
19344 }
19345 else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19346 single_to_half = 1;
19347 else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19348 {
19349 first_error (BAD_FPU);
19350 return;
19351 }
19352
19353 inst.instruction = 0xee3f0e01;
19354 inst.instruction |= single_to_half << 28;
19355 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19356 inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19357 inst.instruction |= t << 12;
19358 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19359 inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19360 inst.is_neon = 1;
19361 }
19362 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19363 {
19364 inst.error = NULL;
19365 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19366 }
19367 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19368 {
19369 inst.error = NULL;
19370 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
19371 }
19372 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19373 {
19374 /* The VCVTB and VCVTT instructions with D-register operands
19375 don't work for SP only targets. */
19376 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19377 _(BAD_FPU));
19378
19379 inst.error = NULL;
19380 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
19381 }
19382 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19383 {
19384 /* The VCVTB and VCVTT instructions with D-register operands
19385 don't work for SP only targets. */
19386 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19387 _(BAD_FPU));
19388
19389 inst.error = NULL;
19390 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
19391 }
19392 else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19393 {
19394 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19395 inst.error = NULL;
19396 inst.instruction |= (1 << 8);
19397 inst.instruction &= ~(1 << 9);
19398 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19399 }
19400 else
19401 return;
19402}
19403
19404static void
19405do_neon_cvtb (void)
19406{
19407 do_neon_cvttb_1 (FALSE);
19408}
19409
19410
19411static void
19412do_neon_cvtt (void)
19413{
19414 do_neon_cvttb_1 (TRUE);
19415}
19416
19417static void
19418neon_move_immediate (void)
19419{
19420 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19421 struct neon_type_el et = neon_check_type (2, rs,
19422 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19423 unsigned immlo, immhi = 0, immbits;
19424 int op, cmode, float_p;
19425
19426 constraint (et.type == NT_invtype,
19427 _("operand size must be specified for immediate VMOV"));
19428
19429 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
19430 op = (inst.instruction & (1 << 5)) != 0;
19431
19432 immlo = inst.operands[1].imm;
19433 if (inst.operands[1].regisimm)
19434 immhi = inst.operands[1].reg;
19435
19436 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19437 _("immediate has bits set outside the operand size"));
19438
19439 float_p = inst.operands[1].immisfloat;
19440
19441 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19442 et.size, et.type)) == FAIL)
19443 {
19444 /* Invert relevant bits only. */
19445 neon_invert_size (&immlo, &immhi, et.size);
19446 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19447 with one or the other; those cases are caught by
19448 neon_cmode_for_move_imm. */
19449 op = !op;
19450 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19451 &op, et.size, et.type)) == FAIL)
19452 {
19453 first_error (_("immediate out of range"));
19454 return;
19455 }
19456 }
19457
19458 inst.instruction &= ~(1 << 5);
19459 inst.instruction |= op << 5;
19460
19461 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19462 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19463 inst.instruction |= neon_quad (rs) << 6;
19464 inst.instruction |= cmode << 8;
19465
19466 neon_write_immbits (immbits);
19467}
19468
19469static void
19470do_neon_mvn (void)
19471{
19472 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
19473 return;
19474
19475 if (inst.operands[1].isreg)
19476 {
19477 enum neon_shape rs;
19478 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19479 rs = neon_select_shape (NS_QQ, NS_NULL);
19480 else
19481 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19482
19483 NEON_ENCODE (INTEGER, inst);
19484 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19485 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19486 inst.instruction |= LOW4 (inst.operands[1].reg);
19487 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19488 inst.instruction |= neon_quad (rs) << 6;
19489 }
19490 else
19491 {
19492 NEON_ENCODE (IMMED, inst);
19493 neon_move_immediate ();
19494 }
19495
19496 neon_dp_fixup (&inst);
19497
19498 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19499 {
19500 constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19501 constraint ((inst.instruction & 0xd00) == 0xd00,
19502 _("immediate value out of range"));
19503 }
19504}
19505
19506/* Encode instructions of form:
19507
19508 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
19509 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
19510
19511static void
19512neon_mixed_length (struct neon_type_el et, unsigned size)
19513{
19514 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19515 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19516 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19517 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19518 inst.instruction |= LOW4 (inst.operands[2].reg);
19519 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19520 inst.instruction |= (et.type == NT_unsigned) << 24;
19521 inst.instruction |= neon_logbits (size) << 20;
19522
19523 neon_dp_fixup (&inst);
19524}
19525
19526static void
19527do_neon_dyadic_long (void)
19528{
19529 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
19530 if (rs == NS_QDD)
19531 {
19532 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19533 return;
19534
19535 NEON_ENCODE (INTEGER, inst);
19536 /* FIXME: Type checking for lengthening op. */
19537 struct neon_type_el et = neon_check_type (3, NS_QDD,
19538 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19539 neon_mixed_length (et, et.size);
19540 }
19541 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19542 && (inst.cond == 0xf || inst.cond == 0x10))
19543 {
19544 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19545 in an IT block with le/lt conditions. */
19546
19547 if (inst.cond == 0xf)
19548 inst.cond = 0xb;
19549 else if (inst.cond == 0x10)
19550 inst.cond = 0xd;
19551
19552 inst.pred_insn_type = INSIDE_IT_INSN;
19553
19554 if (inst.instruction == N_MNEM_vaddl)
19555 {
19556 inst.instruction = N_MNEM_vadd;
19557 do_neon_addsub_if_i ();
19558 }
19559 else if (inst.instruction == N_MNEM_vsubl)
19560 {
19561 inst.instruction = N_MNEM_vsub;
19562 do_neon_addsub_if_i ();
19563 }
19564 else if (inst.instruction == N_MNEM_vabdl)
19565 {
19566 inst.instruction = N_MNEM_vabd;
19567 do_neon_dyadic_if_su ();
19568 }
19569 }
19570 else
19571 first_error (BAD_FPU);
19572}
19573
19574static void
19575do_neon_abal (void)
19576{
19577 struct neon_type_el et = neon_check_type (3, NS_QDD,
19578 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19579 neon_mixed_length (et, et.size);
19580}
19581
19582static void
19583neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19584{
19585 if (inst.operands[2].isscalar)
19586 {
19587 struct neon_type_el et = neon_check_type (3, NS_QDS,
19588 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19589 NEON_ENCODE (SCALAR, inst);
19590 neon_mul_mac (et, et.type == NT_unsigned);
19591 }
19592 else
19593 {
19594 struct neon_type_el et = neon_check_type (3, NS_QDD,
19595 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19596 NEON_ENCODE (INTEGER, inst);
19597 neon_mixed_length (et, et.size);
19598 }
19599}
19600
19601static void
19602do_neon_mac_maybe_scalar_long (void)
19603{
19604 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19605}
19606
19607/* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19608 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
19609
19610static unsigned
19611neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19612{
19613 unsigned regno = NEON_SCALAR_REG (scalar);
19614 unsigned elno = NEON_SCALAR_INDEX (scalar);
19615
19616 if (quad_p)
19617 {
19618 if (regno > 7 || elno > 3)
19619 goto bad_scalar;
19620
19621 return ((regno & 0x7)
19622 | ((elno & 0x1) << 3)
19623 | (((elno >> 1) & 0x1) << 5));
19624 }
19625 else
19626 {
19627 if (regno > 15 || elno > 1)
19628 goto bad_scalar;
19629
19630 return (((regno & 0x1) << 5)
19631 | ((regno >> 1) & 0x7)
19632 | ((elno & 0x1) << 3));
19633 }
19634
19635bad_scalar:
19636 first_error (_("scalar out of range for multiply instruction"));
19637 return 0;
19638}
19639
19640static void
19641do_neon_fmac_maybe_scalar_long (int subtype)
19642{
19643 enum neon_shape rs;
19644 int high8;
19645 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
19646 field (bits[21:20]) has different meaning. For scalar index variant, it's
19647 used to differentiate add and subtract, otherwise it's with fixed value
19648 0x2. */
19649 int size = -1;
19650
19651 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19652 be a scalar index register. */
19653 if (inst.operands[2].isscalar)
19654 {
19655 high8 = 0xfe000000;
19656 if (subtype)
19657 size = 16;
19658 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19659 }
19660 else
19661 {
19662 high8 = 0xfc000000;
19663 size = 32;
19664 if (subtype)
19665 inst.instruction |= (0x1 << 23);
19666 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19667 }
19668
19669
19670 if (inst.cond != COND_ALWAYS)
19671 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19672 "behaviour is UNPREDICTABLE"));
19673
19674 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19675 _(BAD_FP16));
19676
19677 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19678 _(BAD_FPU));
19679
19680 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
19681 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19682 so we simply pass -1 as size. */
19683 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19684 neon_three_same (quad_p, 0, size);
19685
19686 /* Undo neon_dp_fixup. Redo the high eight bits. */
19687 inst.instruction &= 0x00ffffff;
19688 inst.instruction |= high8;
19689
19690#define LOW1(R) ((R) & 0x1)
19691#define HI4(R) (((R) >> 1) & 0xf)
19692 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19693 whether the instruction is in Q form and whether Vm is a scalar indexed
19694 operand. */
19695 if (inst.operands[2].isscalar)
19696 {
19697 unsigned rm
19698 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19699 inst.instruction &= 0xffffffd0;
19700 inst.instruction |= rm;
19701
19702 if (!quad_p)
19703 {
19704 /* Redo Rn as well. */
19705 inst.instruction &= 0xfff0ff7f;
19706 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19707 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19708 }
19709 }
19710 else if (!quad_p)
19711 {
19712 /* Redo Rn and Rm. */
19713 inst.instruction &= 0xfff0ff50;
19714 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19715 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19716 inst.instruction |= HI4 (inst.operands[2].reg);
19717 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19718 }
19719}
19720
19721static void
19722do_neon_vfmal (void)
19723{
19724 return do_neon_fmac_maybe_scalar_long (0);
19725}
19726
19727static void
19728do_neon_vfmsl (void)
19729{
19730 return do_neon_fmac_maybe_scalar_long (1);
19731}
19732
19733static void
19734do_neon_dyadic_wide (void)
19735{
19736 struct neon_type_el et = neon_check_type (3, NS_QQD,
19737 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19738 neon_mixed_length (et, et.size);
19739}
19740
19741static void
19742do_neon_dyadic_narrow (void)
19743{
19744 struct neon_type_el et = neon_check_type (3, NS_QDD,
19745 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19746 /* Operand sign is unimportant, and the U bit is part of the opcode,
19747 so force the operand type to integer. */
19748 et.type = NT_integer;
19749 neon_mixed_length (et, et.size / 2);
19750}
19751
19752static void
19753do_neon_mul_sat_scalar_long (void)
19754{
19755 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19756}
19757
19758static void
19759do_neon_vmull (void)
19760{
19761 if (inst.operands[2].isscalar)
19762 do_neon_mac_maybe_scalar_long ();
19763 else
19764 {
19765 struct neon_type_el et = neon_check_type (3, NS_QDD,
19766 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19767
19768 if (et.type == NT_poly)
19769 NEON_ENCODE (POLY, inst);
19770 else
19771 NEON_ENCODE (INTEGER, inst);
19772
19773 /* For polynomial encoding the U bit must be zero, and the size must
19774 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19775 obviously, as 0b10). */
19776 if (et.size == 64)
19777 {
19778 /* Check we're on the correct architecture. */
19779 if (!mark_feature_used (&fpu_crypto_ext_armv8))
19780 inst.error =
19781 _("Instruction form not available on this architecture.");
19782
19783 et.size = 32;
19784 }
19785
19786 neon_mixed_length (et, et.size);
19787 }
19788}
19789
19790static void
19791do_neon_ext (void)
19792{
19793 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19794 struct neon_type_el et = neon_check_type (3, rs,
19795 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19796 unsigned imm = (inst.operands[3].imm * et.size) / 8;
19797
19798 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19799 _("shift out of range"));
19800 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19801 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19802 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19803 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19804 inst.instruction |= LOW4 (inst.operands[2].reg);
19805 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19806 inst.instruction |= neon_quad (rs) << 6;
19807 inst.instruction |= imm << 8;
19808
19809 neon_dp_fixup (&inst);
19810}
19811
19812static void
19813do_neon_rev (void)
19814{
19815 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
19816 return;
19817
19818 enum neon_shape rs;
19819 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19820 rs = neon_select_shape (NS_QQ, NS_NULL);
19821 else
19822 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19823
19824 struct neon_type_el et = neon_check_type (2, rs,
19825 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19826
19827 unsigned op = (inst.instruction >> 7) & 3;
19828 /* N (width of reversed regions) is encoded as part of the bitmask. We
19829 extract it here to check the elements to be reversed are smaller.
19830 Otherwise we'd get a reserved instruction. */
19831 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19832
19833 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19834 && inst.operands[0].reg == inst.operands[1].reg)
19835 as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19836 " operands makes instruction UNPREDICTABLE"));
19837
19838 gas_assert (elsize != 0);
19839 constraint (et.size >= elsize,
19840 _("elements must be smaller than reversal region"));
19841 neon_two_same (neon_quad (rs), 1, et.size);
19842}
19843
19844static void
19845do_neon_dup (void)
19846{
19847 if (inst.operands[1].isscalar)
19848 {
19849 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19850 BAD_FPU);
19851 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19852 struct neon_type_el et = neon_check_type (2, rs,
19853 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19854 unsigned sizebits = et.size >> 3;
19855 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19856 int logsize = neon_logbits (et.size);
19857 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19858
19859 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19860 return;
19861
19862 NEON_ENCODE (SCALAR, inst);
19863 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19864 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19865 inst.instruction |= LOW4 (dm);
19866 inst.instruction |= HI1 (dm) << 5;
19867 inst.instruction |= neon_quad (rs) << 6;
19868 inst.instruction |= x << 17;
19869 inst.instruction |= sizebits << 16;
19870
19871 neon_dp_fixup (&inst);
19872 }
19873 else
19874 {
19875 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19876 struct neon_type_el et = neon_check_type (2, rs,
19877 N_8 | N_16 | N_32 | N_KEY, N_EQK);
19878 if (rs == NS_QR)
19879 {
19880 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
19881 return;
19882 }
19883 else
19884 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19885 BAD_FPU);
19886
19887 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19888 {
19889 if (inst.operands[1].reg == REG_SP)
19890 as_tsktsk (MVE_BAD_SP);
19891 else if (inst.operands[1].reg == REG_PC)
19892 as_tsktsk (MVE_BAD_PC);
19893 }
19894
19895 /* Duplicate ARM register to lanes of vector. */
19896 NEON_ENCODE (ARMREG, inst);
19897 switch (et.size)
19898 {
19899 case 8: inst.instruction |= 0x400000; break;
19900 case 16: inst.instruction |= 0x000020; break;
19901 case 32: inst.instruction |= 0x000000; break;
19902 default: break;
19903 }
19904 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19905 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19906 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19907 inst.instruction |= neon_quad (rs) << 21;
19908 /* The encoding for this instruction is identical for the ARM and Thumb
19909 variants, except for the condition field. */
19910 do_vfp_cond_or_thumb ();
19911 }
19912}
19913
19914static void
19915do_mve_mov (int toQ)
19916{
19917 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19918 return;
19919 if (inst.cond > COND_ALWAYS)
19920 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
19921
19922 unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
19923 if (toQ)
19924 {
19925 Q0 = 0;
19926 Q1 = 1;
19927 Rt = 2;
19928 Rt2 = 3;
19929 }
19930
19931 constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
19932 _("Index one must be [2,3] and index two must be two less than"
19933 " index one."));
19934 constraint (inst.operands[Rt].reg == inst.operands[Rt2].reg,
19935 _("General purpose registers may not be the same"));
19936 constraint (inst.operands[Rt].reg == REG_SP
19937 || inst.operands[Rt2].reg == REG_SP,
19938 BAD_SP);
19939 constraint (inst.operands[Rt].reg == REG_PC
19940 || inst.operands[Rt2].reg == REG_PC,
19941 BAD_PC);
19942
19943 inst.instruction = 0xec000f00;
19944 inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
19945 inst.instruction |= !!toQ << 20;
19946 inst.instruction |= inst.operands[Rt2].reg << 16;
19947 inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
19948 inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
19949 inst.instruction |= inst.operands[Rt].reg;
19950}
19951
19952static void
19953do_mve_movn (void)
19954{
19955 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19956 return;
19957
19958 if (inst.cond > COND_ALWAYS)
19959 inst.pred_insn_type = INSIDE_VPT_INSN;
19960 else
19961 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
19962
19963 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
19964 | N_KEY);
19965
19966 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19967 inst.instruction |= (neon_logbits (et.size) - 1) << 18;
19968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19969 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19970 inst.instruction |= LOW4 (inst.operands[1].reg);
19971 inst.is_neon = 1;
19972
19973}
19974
19975/* VMOV has particularly many variations. It can be one of:
19976 0. VMOV<c><q> <Qd>, <Qm>
19977 1. VMOV<c><q> <Dd>, <Dm>
19978 (Register operations, which are VORR with Rm = Rn.)
19979 2. VMOV<c><q>.<dt> <Qd>, #<imm>
19980 3. VMOV<c><q>.<dt> <Dd>, #<imm>
19981 (Immediate loads.)
19982 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
19983 (ARM register to scalar.)
19984 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
19985 (Two ARM registers to vector.)
19986 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
19987 (Scalar to ARM register.)
19988 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
19989 (Vector to two ARM registers.)
19990 8. VMOV.F32 <Sd>, <Sm>
19991 9. VMOV.F64 <Dd>, <Dm>
19992 (VFP register moves.)
19993 10. VMOV.F32 <Sd>, #imm
19994 11. VMOV.F64 <Dd>, #imm
19995 (VFP float immediate load.)
19996 12. VMOV <Rd>, <Sm>
19997 (VFP single to ARM reg.)
19998 13. VMOV <Sd>, <Rm>
19999 (ARM reg to VFP single.)
20000 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20001 (Two ARM regs to two VFP singles.)
20002 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20003 (Two VFP singles to two ARM regs.)
20004 16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20005 17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20006 18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20007 19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20008
20009 These cases can be disambiguated using neon_select_shape, except cases 1/9
20010 and 3/11 which depend on the operand type too.
20011
20012 All the encoded bits are hardcoded by this function.
20013
20014 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20015 Cases 5, 7 may be used with VFPv2 and above.
20016
20017 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20018 can specify a type where it doesn't make sense to, and is ignored). */
20019
20020static void
20021do_neon_mov (void)
20022{
20023 enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20024 NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20025 NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20026 NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20027 NS_NULL);
20028 struct neon_type_el et;
20029 const char *ldconst = 0;
20030
20031 switch (rs)
20032 {
20033 case NS_DD: /* case 1/9. */
20034 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20035 /* It is not an error here if no type is given. */
20036 inst.error = NULL;
20037
20038 /* In MVE we interpret the following instructions as same, so ignoring
20039 the following type (float) and size (64) checks.
20040 a: VMOV<c><q> <Dd>, <Dm>
20041 b: VMOV<c><q>.F64 <Dd>, <Dm>. */
20042 if ((et.type == NT_float && et.size == 64)
20043 || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20044 {
20045 do_vfp_nsyn_opcode ("fcpyd");
20046 break;
20047 }
20048 /* fall through. */
20049
20050 case NS_QQ: /* case 0/1. */
20051 {
20052 if (!check_simd_pred_availability (FALSE,
20053 NEON_CHECK_CC | NEON_CHECK_ARCH))
20054 return;
20055 /* The architecture manual I have doesn't explicitly state which
20056 value the U bit should have for register->register moves, but
20057 the equivalent VORR instruction has U = 0, so do that. */
20058 inst.instruction = 0x0200110;
20059 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20060 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20061 inst.instruction |= LOW4 (inst.operands[1].reg);
20062 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20063 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20064 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20065 inst.instruction |= neon_quad (rs) << 6;
20066
20067 neon_dp_fixup (&inst);
20068 }
20069 break;
20070
20071 case NS_DI: /* case 3/11. */
20072 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20073 inst.error = NULL;
20074 if (et.type == NT_float && et.size == 64)
20075 {
20076 /* case 11 (fconstd). */
20077 ldconst = "fconstd";
20078 goto encode_fconstd;
20079 }
20080 /* fall through. */
20081
20082 case NS_QI: /* case 2/3. */
20083 if (!check_simd_pred_availability (FALSE,
20084 NEON_CHECK_CC | NEON_CHECK_ARCH))
20085 return;
20086 inst.instruction = 0x0800010;
20087 neon_move_immediate ();
20088 neon_dp_fixup (&inst);
20089 break;
20090
20091 case NS_SR: /* case 4. */
20092 {
20093 unsigned bcdebits = 0;
20094 int logsize;
20095 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20096 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20097
20098 /* .<size> is optional here, defaulting to .32. */
20099 if (inst.vectype.elems == 0
20100 && inst.operands[0].vectype.type == NT_invtype
20101 && inst.operands[1].vectype.type == NT_invtype)
20102 {
20103 inst.vectype.el[0].type = NT_untyped;
20104 inst.vectype.el[0].size = 32;
20105 inst.vectype.elems = 1;
20106 }
20107
20108 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20109 logsize = neon_logbits (et.size);
20110
20111 if (et.size != 32)
20112 {
20113 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20114 && vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20115 return;
20116 }
20117 else
20118 {
20119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20120 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20121 _(BAD_FPU));
20122 }
20123
20124 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20125 {
20126 if (inst.operands[1].reg == REG_SP)
20127 as_tsktsk (MVE_BAD_SP);
20128 else if (inst.operands[1].reg == REG_PC)
20129 as_tsktsk (MVE_BAD_PC);
20130 }
20131 unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20132
20133 constraint (et.type == NT_invtype, _("bad type for scalar"));
20134 constraint (x >= size / et.size, _("scalar index out of range"));
20135
20136
20137 switch (et.size)
20138 {
20139 case 8: bcdebits = 0x8; break;
20140 case 16: bcdebits = 0x1; break;
20141 case 32: bcdebits = 0x0; break;
20142 default: ;
20143 }
20144
20145 bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20146
20147 inst.instruction = 0xe000b10;
20148 do_vfp_cond_or_thumb ();
20149 inst.instruction |= LOW4 (dn) << 16;
20150 inst.instruction |= HI1 (dn) << 7;
20151 inst.instruction |= inst.operands[1].reg << 12;
20152 inst.instruction |= (bcdebits & 3) << 5;
20153 inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20154 inst.instruction |= (x >> (3-logsize)) << 16;
20155 }
20156 break;
20157
20158 case NS_DRR: /* case 5 (fmdrr). */
20159 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20160 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20161 _(BAD_FPU));
20162
20163 inst.instruction = 0xc400b10;
20164 do_vfp_cond_or_thumb ();
20165 inst.instruction |= LOW4 (inst.operands[0].reg);
20166 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20167 inst.instruction |= inst.operands[1].reg << 12;
20168 inst.instruction |= inst.operands[2].reg << 16;
20169 break;
20170
20171 case NS_RS: /* case 6. */
20172 {
20173 unsigned logsize;
20174 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20175 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20176 unsigned abcdebits = 0;
20177
20178 /* .<dt> is optional here, defaulting to .32. */
20179 if (inst.vectype.elems == 0
20180 && inst.operands[0].vectype.type == NT_invtype
20181 && inst.operands[1].vectype.type == NT_invtype)
20182 {
20183 inst.vectype.el[0].type = NT_untyped;
20184 inst.vectype.el[0].size = 32;
20185 inst.vectype.elems = 1;
20186 }
20187
20188 et = neon_check_type (2, NS_NULL,
20189 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20190 logsize = neon_logbits (et.size);
20191
20192 if (et.size != 32)
20193 {
20194 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20195 && vfp_or_neon_is_neon (NEON_CHECK_CC
20196 | NEON_CHECK_ARCH) == FAIL)
20197 return;
20198 }
20199 else
20200 {
20201 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20202 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20203 _(BAD_FPU));
20204 }
20205
20206 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20207 {
20208 if (inst.operands[0].reg == REG_SP)
20209 as_tsktsk (MVE_BAD_SP);
20210 else if (inst.operands[0].reg == REG_PC)
20211 as_tsktsk (MVE_BAD_PC);
20212 }
20213
20214 unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20215
20216 constraint (et.type == NT_invtype, _("bad type for scalar"));
20217 constraint (x >= size / et.size, _("scalar index out of range"));
20218
20219 switch (et.size)
20220 {
20221 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20222 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20223 case 32: abcdebits = 0x00; break;
20224 default: ;
20225 }
20226
20227 abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20228 inst.instruction = 0xe100b10;
20229 do_vfp_cond_or_thumb ();
20230 inst.instruction |= LOW4 (dn) << 16;
20231 inst.instruction |= HI1 (dn) << 7;
20232 inst.instruction |= inst.operands[0].reg << 12;
20233 inst.instruction |= (abcdebits & 3) << 5;
20234 inst.instruction |= (abcdebits >> 2) << 21;
20235 inst.instruction |= (x >> (3-logsize)) << 16;
20236 }
20237 break;
20238
20239 case NS_RRD: /* case 7 (fmrrd). */
20240 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20241 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20242 _(BAD_FPU));
20243
20244 inst.instruction = 0xc500b10;
20245 do_vfp_cond_or_thumb ();
20246 inst.instruction |= inst.operands[0].reg << 12;
20247 inst.instruction |= inst.operands[1].reg << 16;
20248 inst.instruction |= LOW4 (inst.operands[2].reg);
20249 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20250 break;
20251
20252 case NS_FF: /* case 8 (fcpys). */
20253 do_vfp_nsyn_opcode ("fcpys");
20254 break;
20255
20256 case NS_HI:
20257 case NS_FI: /* case 10 (fconsts). */
20258 ldconst = "fconsts";
20259 encode_fconstd:
20260 if (!inst.operands[1].immisfloat)
20261 {
20262 unsigned new_imm;
20263 /* Immediate has to fit in 8 bits so float is enough. */
20264 float imm = (float) inst.operands[1].imm;
20265 memcpy (&new_imm, &imm, sizeof (float));
20266 /* But the assembly may have been written to provide an integer
20267 bit pattern that equates to a float, so check that the
20268 conversion has worked. */
20269 if (is_quarter_float (new_imm))
20270 {
20271 if (is_quarter_float (inst.operands[1].imm))
20272 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20273
20274 inst.operands[1].imm = new_imm;
20275 inst.operands[1].immisfloat = 1;
20276 }
20277 }
20278
20279 if (is_quarter_float (inst.operands[1].imm))
20280 {
20281 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20282 do_vfp_nsyn_opcode (ldconst);
20283
20284 /* ARMv8.2 fp16 vmov.f16 instruction. */
20285 if (rs == NS_HI)
20286 do_scalar_fp16_v82_encode ();
20287 }
20288 else
20289 first_error (_("immediate out of range"));
20290 break;
20291
20292 case NS_RH:
20293 case NS_RF: /* case 12 (fmrs). */
20294 do_vfp_nsyn_opcode ("fmrs");
20295 /* ARMv8.2 fp16 vmov.f16 instruction. */
20296 if (rs == NS_RH)
20297 do_scalar_fp16_v82_encode ();
20298 break;
20299
20300 case NS_HR:
20301 case NS_FR: /* case 13 (fmsr). */
20302 do_vfp_nsyn_opcode ("fmsr");
20303 /* ARMv8.2 fp16 vmov.f16 instruction. */
20304 if (rs == NS_HR)
20305 do_scalar_fp16_v82_encode ();
20306 break;
20307
20308 case NS_RRSS:
20309 do_mve_mov (0);
20310 break;
20311 case NS_SSRR:
20312 do_mve_mov (1);
20313 break;
20314
20315 /* The encoders for the fmrrs and fmsrr instructions expect three operands
20316 (one of which is a list), but we have parsed four. Do some fiddling to
20317 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20318 expect. */
20319 case NS_RRFF: /* case 14 (fmrrs). */
20320 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20321 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20322 _(BAD_FPU));
20323 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20324 _("VFP registers must be adjacent"));
20325 inst.operands[2].imm = 2;
20326 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20327 do_vfp_nsyn_opcode ("fmrrs");
20328 break;
20329
20330 case NS_FFRR: /* case 15 (fmsrr). */
20331 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20332 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20333 _(BAD_FPU));
20334 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20335 _("VFP registers must be adjacent"));
20336 inst.operands[1] = inst.operands[2];
20337 inst.operands[2] = inst.operands[3];
20338 inst.operands[0].imm = 2;
20339 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20340 do_vfp_nsyn_opcode ("fmsrr");
20341 break;
20342
20343 case NS_NULL:
20344 /* neon_select_shape has determined that the instruction
20345 shape is wrong and has already set the error message. */
20346 break;
20347
20348 default:
20349 abort ();
20350 }
20351}
20352
20353static void
20354do_mve_movl (void)
20355{
20356 if (!(inst.operands[0].present && inst.operands[0].isquad
20357 && inst.operands[1].present && inst.operands[1].isquad
20358 && !inst.operands[2].present))
20359 {
20360 inst.instruction = 0;
20361 inst.cond = 0xb;
20362 if (thumb_mode)
20363 set_pred_insn_type (INSIDE_IT_INSN);
20364 do_neon_mov ();
20365 return;
20366 }
20367
20368 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20369 return;
20370
20371 if (inst.cond != COND_ALWAYS)
20372 inst.pred_insn_type = INSIDE_VPT_INSN;
20373
20374 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20375 | N_S16 | N_U16 | N_KEY);
20376
20377 inst.instruction |= (et.type == NT_unsigned) << 28;
20378 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20379 inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20380 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20381 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20382 inst.instruction |= LOW4 (inst.operands[1].reg);
20383 inst.is_neon = 1;
20384}
20385
20386static void
20387do_neon_rshift_round_imm (void)
20388{
20389 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20390 return;
20391
20392 enum neon_shape rs;
20393 struct neon_type_el et;
20394
20395 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20396 {
20397 rs = neon_select_shape (NS_QQI, NS_NULL);
20398 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20399 }
20400 else
20401 {
20402 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20403 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20404 }
20405 int imm = inst.operands[2].imm;
20406
20407 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
20408 if (imm == 0)
20409 {
20410 inst.operands[2].present = 0;
20411 do_neon_mov ();
20412 return;
20413 }
20414
20415 constraint (imm < 1 || (unsigned)imm > et.size,
20416 _("immediate out of range for shift"));
20417 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
20418 et.size - imm);
20419}
20420
20421static void
20422do_neon_movhf (void)
20423{
20424 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20425 constraint (rs != NS_HH, _("invalid suffix"));
20426
20427 if (inst.cond != COND_ALWAYS)
20428 {
20429 if (thumb_mode)
20430 {
20431 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20432 " the behaviour is UNPREDICTABLE"));
20433 }
20434 else
20435 {
20436 inst.error = BAD_COND;
20437 return;
20438 }
20439 }
20440
20441 do_vfp_sp_monadic ();
20442
20443 inst.is_neon = 1;
20444 inst.instruction |= 0xf0000000;
20445}
20446
20447static void
20448do_neon_movl (void)
20449{
20450 struct neon_type_el et = neon_check_type (2, NS_QD,
20451 N_EQK | N_DBL, N_SU_32 | N_KEY);
20452 unsigned sizebits = et.size >> 3;
20453 inst.instruction |= sizebits << 19;
20454 neon_two_same (0, et.type == NT_unsigned, -1);
20455}
20456
20457static void
20458do_neon_trn (void)
20459{
20460 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20461 struct neon_type_el et = neon_check_type (2, rs,
20462 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20463 NEON_ENCODE (INTEGER, inst);
20464 neon_two_same (neon_quad (rs), 1, et.size);
20465}
20466
20467static void
20468do_neon_zip_uzp (void)
20469{
20470 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20471 struct neon_type_el et = neon_check_type (2, rs,
20472 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20473 if (rs == NS_DD && et.size == 32)
20474 {
20475 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
20476 inst.instruction = N_MNEM_vtrn;
20477 do_neon_trn ();
20478 return;
20479 }
20480 neon_two_same (neon_quad (rs), 1, et.size);
20481}
20482
20483static void
20484do_neon_sat_abs_neg (void)
20485{
20486 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
20487 return;
20488
20489 enum neon_shape rs;
20490 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20491 rs = neon_select_shape (NS_QQ, NS_NULL);
20492 else
20493 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20494 struct neon_type_el et = neon_check_type (2, rs,
20495 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20496 neon_two_same (neon_quad (rs), 1, et.size);
20497}
20498
20499static void
20500do_neon_pair_long (void)
20501{
20502 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20503 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20504 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
20505 inst.instruction |= (et.type == NT_unsigned) << 7;
20506 neon_two_same (neon_quad (rs), 1, et.size);
20507}
20508
20509static void
20510do_neon_recip_est (void)
20511{
20512 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20513 struct neon_type_el et = neon_check_type (2, rs,
20514 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20515 inst.instruction |= (et.type == NT_float) << 8;
20516 neon_two_same (neon_quad (rs), 1, et.size);
20517}
20518
20519static void
20520do_neon_cls (void)
20521{
20522 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20523 return;
20524
20525 enum neon_shape rs;
20526 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20527 rs = neon_select_shape (NS_QQ, NS_NULL);
20528 else
20529 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20530
20531 struct neon_type_el et = neon_check_type (2, rs,
20532 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20533 neon_two_same (neon_quad (rs), 1, et.size);
20534}
20535
20536static void
20537do_neon_clz (void)
20538{
20539 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20540 return;
20541
20542 enum neon_shape rs;
20543 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20544 rs = neon_select_shape (NS_QQ, NS_NULL);
20545 else
20546 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20547
20548 struct neon_type_el et = neon_check_type (2, rs,
20549 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20550 neon_two_same (neon_quad (rs), 1, et.size);
20551}
20552
20553static void
20554do_neon_cnt (void)
20555{
20556 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20557 struct neon_type_el et = neon_check_type (2, rs,
20558 N_EQK | N_INT, N_8 | N_KEY);
20559 neon_two_same (neon_quad (rs), 1, et.size);
20560}
20561
20562static void
20563do_neon_swp (void)
20564{
20565 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20566 neon_two_same (neon_quad (rs), 1, -1);
20567}
20568
20569static void
20570do_neon_tbl_tbx (void)
20571{
20572 unsigned listlenbits;
20573 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20574
20575 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20576 {
20577 first_error (_("bad list length for table lookup"));
20578 return;
20579 }
20580
20581 listlenbits = inst.operands[1].imm - 1;
20582 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20583 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20584 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20585 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20586 inst.instruction |= LOW4 (inst.operands[2].reg);
20587 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20588 inst.instruction |= listlenbits << 8;
20589
20590 neon_dp_fixup (&inst);
20591}
20592
20593static void
20594do_neon_ldm_stm (void)
20595{
20596 /* P, U and L bits are part of bitmask. */
20597 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20598 unsigned offsetbits = inst.operands[1].imm * 2;
20599
20600 if (inst.operands[1].issingle)
20601 {
20602 do_vfp_nsyn_ldm_stm (is_dbmode);
20603 return;
20604 }
20605
20606 constraint (is_dbmode && !inst.operands[0].writeback,
20607 _("writeback (!) must be used for VLDMDB and VSTMDB"));
20608
20609 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20610 _("register list must contain at least 1 and at most 16 "
20611 "registers"));
20612
20613 inst.instruction |= inst.operands[0].reg << 16;
20614 inst.instruction |= inst.operands[0].writeback << 21;
20615 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20616 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20617
20618 inst.instruction |= offsetbits;
20619
20620 do_vfp_cond_or_thumb ();
20621}
20622
20623static void
20624do_neon_ldr_str (void)
20625{
20626 int is_ldr = (inst.instruction & (1 << 20)) != 0;
20627
20628 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20629 And is UNPREDICTABLE in thumb mode. */
20630 if (!is_ldr
20631 && inst.operands[1].reg == REG_PC
20632 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20633 {
20634 if (thumb_mode)
20635 inst.error = _("Use of PC here is UNPREDICTABLE");
20636 else if (warn_on_deprecated)
20637 as_tsktsk (_("Use of PC here is deprecated"));
20638 }
20639
20640 if (inst.operands[0].issingle)
20641 {
20642 if (is_ldr)
20643 do_vfp_nsyn_opcode ("flds");
20644 else
20645 do_vfp_nsyn_opcode ("fsts");
20646
20647 /* ARMv8.2 vldr.16/vstr.16 instruction. */
20648 if (inst.vectype.el[0].size == 16)
20649 do_scalar_fp16_v82_encode ();
20650 }
20651 else
20652 {
20653 if (is_ldr)
20654 do_vfp_nsyn_opcode ("fldd");
20655 else
20656 do_vfp_nsyn_opcode ("fstd");
20657 }
20658}
20659
20660static void
20661do_t_vldr_vstr_sysreg (void)
20662{
20663 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20664 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20665
20666 /* Use of PC is UNPREDICTABLE. */
20667 if (inst.operands[1].reg == REG_PC)
20668 inst.error = _("Use of PC here is UNPREDICTABLE");
20669
20670 if (inst.operands[1].immisreg)
20671 inst.error = _("instruction does not accept register index");
20672
20673 if (!inst.operands[1].isreg)
20674 inst.error = _("instruction does not accept PC-relative addressing");
20675
20676 if (abs (inst.operands[1].imm) >= (1 << 7))
20677 inst.error = _("immediate value out of range");
20678
20679 inst.instruction = 0xec000f80;
20680 if (is_vldr)
20681 inst.instruction |= 1 << sysreg_vldr_bitno;
20682 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20683 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20684 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20685}
20686
20687static void
20688do_vldr_vstr (void)
20689{
20690 bfd_boolean sysreg_op = !inst.operands[0].isreg;
20691
20692 /* VLDR/VSTR (System Register). */
20693 if (sysreg_op)
20694 {
20695 if (!mark_feature_used (&arm_ext_v8_1m_main))
20696 as_bad (_("Instruction not permitted on this architecture"));
20697
20698 do_t_vldr_vstr_sysreg ();
20699 }
20700 /* VLDR/VSTR. */
20701 else
20702 {
20703 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
20704 as_bad (_("Instruction not permitted on this architecture"));
20705 do_neon_ldr_str ();
20706 }
20707}
20708
20709/* "interleave" version also handles non-interleaving register VLD1/VST1
20710 instructions. */
20711
20712static void
20713do_neon_ld_st_interleave (void)
20714{
20715 struct neon_type_el et = neon_check_type (1, NS_NULL,
20716 N_8 | N_16 | N_32 | N_64);
20717 unsigned alignbits = 0;
20718 unsigned idx;
20719 /* The bits in this table go:
20720 0: register stride of one (0) or two (1)
20721 1,2: register list length, minus one (1, 2, 3, 4).
20722 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20723 We use -1 for invalid entries. */
20724 const int typetable[] =
20725 {
20726 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
20727 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
20728 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
20729 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
20730 };
20731 int typebits;
20732
20733 if (et.type == NT_invtype)
20734 return;
20735
20736 if (inst.operands[1].immisalign)
20737 switch (inst.operands[1].imm >> 8)
20738 {
20739 case 64: alignbits = 1; break;
20740 case 128:
20741 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20742 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20743 goto bad_alignment;
20744 alignbits = 2;
20745 break;
20746 case 256:
20747 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20748 goto bad_alignment;
20749 alignbits = 3;
20750 break;
20751 default:
20752 bad_alignment:
20753 first_error (_("bad alignment"));
20754 return;
20755 }
20756
20757 inst.instruction |= alignbits << 4;
20758 inst.instruction |= neon_logbits (et.size) << 6;
20759
20760 /* Bits [4:6] of the immediate in a list specifier encode register stride
20761 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20762 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20763 up the right value for "type" in a table based on this value and the given
20764 list style, then stick it back. */
20765 idx = ((inst.operands[0].imm >> 4) & 7)
20766 | (((inst.instruction >> 8) & 3) << 3);
20767
20768 typebits = typetable[idx];
20769
20770 constraint (typebits == -1, _("bad list type for instruction"));
20771 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20772 BAD_EL_TYPE);
20773
20774 inst.instruction &= ~0xf00;
20775 inst.instruction |= typebits << 8;
20776}
20777
20778/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20779 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20780 otherwise. The variable arguments are a list of pairs of legal (size, align)
20781 values, terminated with -1. */
20782
20783static int
20784neon_alignment_bit (int size, int align, int *do_alignment, ...)
20785{
20786 va_list ap;
20787 int result = FAIL, thissize, thisalign;
20788
20789 if (!inst.operands[1].immisalign)
20790 {
20791 *do_alignment = 0;
20792 return SUCCESS;
20793 }
20794
20795 va_start (ap, do_alignment);
20796
20797 do
20798 {
20799 thissize = va_arg (ap, int);
20800 if (thissize == -1)
20801 break;
20802 thisalign = va_arg (ap, int);
20803
20804 if (size == thissize && align == thisalign)
20805 result = SUCCESS;
20806 }
20807 while (result != SUCCESS);
20808
20809 va_end (ap);
20810
20811 if (result == SUCCESS)
20812 *do_alignment = 1;
20813 else
20814 first_error (_("unsupported alignment for instruction"));
20815
20816 return result;
20817}
20818
20819static void
20820do_neon_ld_st_lane (void)
20821{
20822 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20823 int align_good, do_alignment = 0;
20824 int logsize = neon_logbits (et.size);
20825 int align = inst.operands[1].imm >> 8;
20826 int n = (inst.instruction >> 8) & 3;
20827 int max_el = 64 / et.size;
20828
20829 if (et.type == NT_invtype)
20830 return;
20831
20832 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20833 _("bad list length"));
20834 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20835 _("scalar index out of range"));
20836 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20837 && et.size == 8,
20838 _("stride of 2 unavailable when element size is 8"));
20839
20840 switch (n)
20841 {
20842 case 0: /* VLD1 / VST1. */
20843 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20844 32, 32, -1);
20845 if (align_good == FAIL)
20846 return;
20847 if (do_alignment)
20848 {
20849 unsigned alignbits = 0;
20850 switch (et.size)
20851 {
20852 case 16: alignbits = 0x1; break;
20853 case 32: alignbits = 0x3; break;
20854 default: ;
20855 }
20856 inst.instruction |= alignbits << 4;
20857 }
20858 break;
20859
20860 case 1: /* VLD2 / VST2. */
20861 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20862 16, 32, 32, 64, -1);
20863 if (align_good == FAIL)
20864 return;
20865 if (do_alignment)
20866 inst.instruction |= 1 << 4;
20867 break;
20868
20869 case 2: /* VLD3 / VST3. */
20870 constraint (inst.operands[1].immisalign,
20871 _("can't use alignment with this instruction"));
20872 break;
20873
20874 case 3: /* VLD4 / VST4. */
20875 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20876 16, 64, 32, 64, 32, 128, -1);
20877 if (align_good == FAIL)
20878 return;
20879 if (do_alignment)
20880 {
20881 unsigned alignbits = 0;
20882 switch (et.size)
20883 {
20884 case 8: alignbits = 0x1; break;
20885 case 16: alignbits = 0x1; break;
20886 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
20887 default: ;
20888 }
20889 inst.instruction |= alignbits << 4;
20890 }
20891 break;
20892
20893 default: ;
20894 }
20895
20896 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
20897 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20898 inst.instruction |= 1 << (4 + logsize);
20899
20900 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
20901 inst.instruction |= logsize << 10;
20902}
20903
20904/* Encode single n-element structure to all lanes VLD<n> instructions. */
20905
20906static void
20907do_neon_ld_dup (void)
20908{
20909 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20910 int align_good, do_alignment = 0;
20911
20912 if (et.type == NT_invtype)
20913 return;
20914
20915 switch ((inst.instruction >> 8) & 3)
20916 {
20917 case 0: /* VLD1. */
20918 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
20919 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20920 &do_alignment, 16, 16, 32, 32, -1);
20921 if (align_good == FAIL)
20922 return;
20923 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
20924 {
20925 case 1: break;
20926 case 2: inst.instruction |= 1 << 5; break;
20927 default: first_error (_("bad list length")); return;
20928 }
20929 inst.instruction |= neon_logbits (et.size) << 6;
20930 break;
20931
20932 case 1: /* VLD2. */
20933 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20934 &do_alignment, 8, 16, 16, 32, 32, 64,
20935 -1);
20936 if (align_good == FAIL)
20937 return;
20938 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
20939 _("bad list length"));
20940 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20941 inst.instruction |= 1 << 5;
20942 inst.instruction |= neon_logbits (et.size) << 6;
20943 break;
20944
20945 case 2: /* VLD3. */
20946 constraint (inst.operands[1].immisalign,
20947 _("can't use alignment with this instruction"));
20948 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
20949 _("bad list length"));
20950 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20951 inst.instruction |= 1 << 5;
20952 inst.instruction |= neon_logbits (et.size) << 6;
20953 break;
20954
20955 case 3: /* VLD4. */
20956 {
20957 int align = inst.operands[1].imm >> 8;
20958 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20959 16, 64, 32, 64, 32, 128, -1);
20960 if (align_good == FAIL)
20961 return;
20962 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
20963 _("bad list length"));
20964 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20965 inst.instruction |= 1 << 5;
20966 if (et.size == 32 && align == 128)
20967 inst.instruction |= 0x3 << 6;
20968 else
20969 inst.instruction |= neon_logbits (et.size) << 6;
20970 }
20971 break;
20972
20973 default: ;
20974 }
20975
20976 inst.instruction |= do_alignment << 4;
20977}
20978
20979/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
20980 apart from bits [11:4]. */
20981
20982static void
20983do_neon_ldx_stx (void)
20984{
20985 if (inst.operands[1].isreg)
20986 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
20987
20988 switch (NEON_LANE (inst.operands[0].imm))
20989 {
20990 case NEON_INTERLEAVE_LANES:
20991 NEON_ENCODE (INTERLV, inst);
20992 do_neon_ld_st_interleave ();
20993 break;
20994
20995 case NEON_ALL_LANES:
20996 NEON_ENCODE (DUP, inst);
20997 if (inst.instruction == N_INV)
20998 {
20999 first_error ("only loads support such operands");
21000 break;
21001 }
21002 do_neon_ld_dup ();
21003 break;
21004
21005 default:
21006 NEON_ENCODE (LANE, inst);
21007 do_neon_ld_st_lane ();
21008 }
21009
21010 /* L bit comes from bit mask. */
21011 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21012 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21013 inst.instruction |= inst.operands[1].reg << 16;
21014
21015 if (inst.operands[1].postind)
21016 {
21017 int postreg = inst.operands[1].imm & 0xf;
21018 constraint (!inst.operands[1].immisreg,
21019 _("post-index must be a register"));
21020 constraint (postreg == 0xd || postreg == 0xf,
21021 _("bad register for post-index"));
21022 inst.instruction |= postreg;
21023 }
21024 else
21025 {
21026 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21027 constraint (inst.relocs[0].exp.X_op != O_constant
21028 || inst.relocs[0].exp.X_add_number != 0,
21029 BAD_ADDR_MODE);
21030
21031 if (inst.operands[1].writeback)
21032 {
21033 inst.instruction |= 0xd;
21034 }
21035 else
21036 inst.instruction |= 0xf;
21037 }
21038
21039 if (thumb_mode)
21040 inst.instruction |= 0xf9000000;
21041 else
21042 inst.instruction |= 0xf4000000;
21043}
21044
21045/* FP v8. */
21046static void
21047do_vfp_nsyn_fpv8 (enum neon_shape rs)
21048{
21049 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21050 D register operands. */
21051 if (neon_shape_class[rs] == SC_DOUBLE)
21052 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21053 _(BAD_FPU));
21054
21055 NEON_ENCODE (FPV8, inst);
21056
21057 if (rs == NS_FFF || rs == NS_HHH)
21058 {
21059 do_vfp_sp_dyadic ();
21060
21061 /* ARMv8.2 fp16 instruction. */
21062 if (rs == NS_HHH)
21063 do_scalar_fp16_v82_encode ();
21064 }
21065 else
21066 do_vfp_dp_rd_rn_rm ();
21067
21068 if (rs == NS_DDD)
21069 inst.instruction |= 0x100;
21070
21071 inst.instruction |= 0xf0000000;
21072}
21073
21074static void
21075do_vsel (void)
21076{
21077 set_pred_insn_type (OUTSIDE_PRED_INSN);
21078
21079 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21080 first_error (_("invalid instruction shape"));
21081}
21082
21083static void
21084do_vmaxnm (void)
21085{
21086 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21087 set_pred_insn_type (OUTSIDE_PRED_INSN);
21088
21089 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21090 return;
21091
21092 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21093 return;
21094
21095 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21096}
21097
21098static void
21099do_vrint_1 (enum neon_cvt_mode mode)
21100{
21101 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21102 struct neon_type_el et;
21103
21104 if (rs == NS_NULL)
21105 return;
21106
21107 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21108 D register operands. */
21109 if (neon_shape_class[rs] == SC_DOUBLE)
21110 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21111 _(BAD_FPU));
21112
21113 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21114 | N_VFP);
21115 if (et.type != NT_invtype)
21116 {
21117 /* VFP encodings. */
21118 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21119 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21120 set_pred_insn_type (OUTSIDE_PRED_INSN);
21121
21122 NEON_ENCODE (FPV8, inst);
21123 if (rs == NS_FF || rs == NS_HH)
21124 do_vfp_sp_monadic ();
21125 else
21126 do_vfp_dp_rd_rm ();
21127
21128 switch (mode)
21129 {
21130 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21131 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21132 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21133 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21134 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21135 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21136 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21137 default: abort ();
21138 }
21139
21140 inst.instruction |= (rs == NS_DD) << 8;
21141 do_vfp_cond_or_thumb ();
21142
21143 /* ARMv8.2 fp16 vrint instruction. */
21144 if (rs == NS_HH)
21145 do_scalar_fp16_v82_encode ();
21146 }
21147 else
21148 {
21149 /* Neon encodings (or something broken...). */
21150 inst.error = NULL;
21151 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21152
21153 if (et.type == NT_invtype)
21154 return;
21155
21156 if (!check_simd_pred_availability (TRUE,
21157 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21158 return;
21159
21160 NEON_ENCODE (FLOAT, inst);
21161
21162 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21163 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21164 inst.instruction |= LOW4 (inst.operands[1].reg);
21165 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21166 inst.instruction |= neon_quad (rs) << 6;
21167 /* Mask off the original size bits and reencode them. */
21168 inst.instruction = ((inst.instruction & 0xfff3ffff)
21169 | neon_logbits (et.size) << 18);
21170
21171 switch (mode)
21172 {
21173 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21174 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21175 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21176 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21177 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21178 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21179 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21180 default: abort ();
21181 }
21182
21183 if (thumb_mode)
21184 inst.instruction |= 0xfc000000;
21185 else
21186 inst.instruction |= 0xf0000000;
21187 }
21188}
21189
21190static void
21191do_vrintx (void)
21192{
21193 do_vrint_1 (neon_cvt_mode_x);
21194}
21195
21196static void
21197do_vrintz (void)
21198{
21199 do_vrint_1 (neon_cvt_mode_z);
21200}
21201
21202static void
21203do_vrintr (void)
21204{
21205 do_vrint_1 (neon_cvt_mode_r);
21206}
21207
21208static void
21209do_vrinta (void)
21210{
21211 do_vrint_1 (neon_cvt_mode_a);
21212}
21213
21214static void
21215do_vrintn (void)
21216{
21217 do_vrint_1 (neon_cvt_mode_n);
21218}
21219
21220static void
21221do_vrintp (void)
21222{
21223 do_vrint_1 (neon_cvt_mode_p);
21224}
21225
21226static void
21227do_vrintm (void)
21228{
21229 do_vrint_1 (neon_cvt_mode_m);
21230}
21231
21232static unsigned
21233neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21234{
21235 unsigned regno = NEON_SCALAR_REG (opnd);
21236 unsigned elno = NEON_SCALAR_INDEX (opnd);
21237
21238 if (elsize == 16 && elno < 2 && regno < 16)
21239 return regno | (elno << 4);
21240 else if (elsize == 32 && elno == 0)
21241 return regno;
21242
21243 first_error (_("scalar out of range"));
21244 return 0;
21245}
21246
21247static void
21248do_vcmla (void)
21249{
21250 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21251 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21252 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21253 constraint (inst.relocs[0].exp.X_op != O_constant,
21254 _("expression too complex"));
21255 unsigned rot = inst.relocs[0].exp.X_add_number;
21256 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21257 _("immediate out of range"));
21258 rot /= 90;
21259
21260 if (!check_simd_pred_availability (TRUE,
21261 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21262 return;
21263
21264 if (inst.operands[2].isscalar)
21265 {
21266 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21267 first_error (_("invalid instruction shape"));
21268 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21269 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21270 N_KEY | N_F16 | N_F32).size;
21271 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21272 inst.is_neon = 1;
21273 inst.instruction = 0xfe000800;
21274 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21275 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21276 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21277 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21278 inst.instruction |= LOW4 (m);
21279 inst.instruction |= HI1 (m) << 5;
21280 inst.instruction |= neon_quad (rs) << 6;
21281 inst.instruction |= rot << 20;
21282 inst.instruction |= (size == 32) << 23;
21283 }
21284 else
21285 {
21286 enum neon_shape rs;
21287 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21288 rs = neon_select_shape (NS_QQQI, NS_NULL);
21289 else
21290 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21291
21292 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21293 N_KEY | N_F16 | N_F32).size;
21294 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21295 && (inst.operands[0].reg == inst.operands[1].reg
21296 || inst.operands[0].reg == inst.operands[2].reg))
21297 as_tsktsk (BAD_MVE_SRCDEST);
21298
21299 neon_three_same (neon_quad (rs), 0, -1);
21300 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21301 inst.instruction |= 0xfc200800;
21302 inst.instruction |= rot << 23;
21303 inst.instruction |= (size == 32) << 20;
21304 }
21305}
21306
21307static void
21308do_vcadd (void)
21309{
21310 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21311 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21312 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21313 constraint (inst.relocs[0].exp.X_op != O_constant,
21314 _("expression too complex"));
21315
21316 unsigned rot = inst.relocs[0].exp.X_add_number;
21317 constraint (rot != 90 && rot != 270, _("immediate out of range"));
21318 enum neon_shape rs;
21319 struct neon_type_el et;
21320 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21321 {
21322 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21323 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21324 }
21325 else
21326 {
21327 rs = neon_select_shape (NS_QQQI, NS_NULL);
21328 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21329 | N_I16 | N_I32);
21330 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21331 as_tsktsk (_("Warning: 32-bit element size and same first and third "
21332 "operand makes instruction UNPREDICTABLE"));
21333 }
21334
21335 if (et.type == NT_invtype)
21336 return;
21337
21338 if (!check_simd_pred_availability (et.type == NT_float,
21339 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21340 return;
21341
21342 if (et.type == NT_float)
21343 {
21344 neon_three_same (neon_quad (rs), 0, -1);
21345 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21346 inst.instruction |= 0xfc800800;
21347 inst.instruction |= (rot == 270) << 24;
21348 inst.instruction |= (et.size == 32) << 20;
21349 }
21350 else
21351 {
21352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21353 inst.instruction = 0xfe000f00;
21354 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21355 inst.instruction |= neon_logbits (et.size) << 20;
21356 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21357 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21358 inst.instruction |= (rot == 270) << 12;
21359 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21360 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21361 inst.instruction |= LOW4 (inst.operands[2].reg);
21362 inst.is_neon = 1;
21363 }
21364}
21365
21366/* Dot Product instructions encoding support. */
21367
21368static void
21369do_neon_dotproduct (int unsigned_p)
21370{
21371 enum neon_shape rs;
21372 unsigned scalar_oprd2 = 0;
21373 int high8;
21374
21375 if (inst.cond != COND_ALWAYS)
21376 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
21377 "is UNPREDICTABLE"));
21378
21379 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21380 _(BAD_FPU));
21381
21382 /* Dot Product instructions are in three-same D/Q register format or the third
21383 operand can be a scalar index register. */
21384 if (inst.operands[2].isscalar)
21385 {
21386 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21387 high8 = 0xfe000000;
21388 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21389 }
21390 else
21391 {
21392 high8 = 0xfc000000;
21393 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21394 }
21395
21396 if (unsigned_p)
21397 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21398 else
21399 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21400
21401 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21402 Product instruction, so we pass 0 as the "ubit" parameter. And the
21403 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
21404 neon_three_same (neon_quad (rs), 0, 32);
21405
21406 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
21407 different NEON three-same encoding. */
21408 inst.instruction &= 0x00ffffff;
21409 inst.instruction |= high8;
21410 /* Encode 'U' bit which indicates signedness. */
21411 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21412 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
21413 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21414 the instruction encoding. */
21415 if (inst.operands[2].isscalar)
21416 {
21417 inst.instruction &= 0xffffffd0;
21418 inst.instruction |= LOW4 (scalar_oprd2);
21419 inst.instruction |= HI1 (scalar_oprd2) << 5;
21420 }
21421}
21422
21423/* Dot Product instructions for signed integer. */
21424
21425static void
21426do_neon_dotproduct_s (void)
21427{
21428 return do_neon_dotproduct (0);
21429}
21430
21431/* Dot Product instructions for unsigned integer. */
21432
21433static void
21434do_neon_dotproduct_u (void)
21435{
21436 return do_neon_dotproduct (1);
21437}
21438
21439/* Crypto v1 instructions. */
21440static void
21441do_crypto_2op_1 (unsigned elttype, int op)
21442{
21443 set_pred_insn_type (OUTSIDE_PRED_INSN);
21444
21445 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
21446 == NT_invtype)
21447 return;
21448
21449 inst.error = NULL;
21450
21451 NEON_ENCODE (INTEGER, inst);
21452 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21453 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21454 inst.instruction |= LOW4 (inst.operands[1].reg);
21455 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21456 if (op != -1)
21457 inst.instruction |= op << 6;
21458
21459 if (thumb_mode)
21460 inst.instruction |= 0xfc000000;
21461 else
21462 inst.instruction |= 0xf0000000;
21463}
21464
21465static void
21466do_crypto_3op_1 (int u, int op)
21467{
21468 set_pred_insn_type (OUTSIDE_PRED_INSN);
21469
21470 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
21471 N_32 | N_UNT | N_KEY).type == NT_invtype)
21472 return;
21473
21474 inst.error = NULL;
21475
21476 NEON_ENCODE (INTEGER, inst);
21477 neon_three_same (1, u, 8 << op);
21478}
21479
21480static void
21481do_aese (void)
21482{
21483 do_crypto_2op_1 (N_8, 0);
21484}
21485
21486static void
21487do_aesd (void)
21488{
21489 do_crypto_2op_1 (N_8, 1);
21490}
21491
21492static void
21493do_aesmc (void)
21494{
21495 do_crypto_2op_1 (N_8, 2);
21496}
21497
21498static void
21499do_aesimc (void)
21500{
21501 do_crypto_2op_1 (N_8, 3);
21502}
21503
21504static void
21505do_sha1c (void)
21506{
21507 do_crypto_3op_1 (0, 0);
21508}
21509
21510static void
21511do_sha1p (void)
21512{
21513 do_crypto_3op_1 (0, 1);
21514}
21515
21516static void
21517do_sha1m (void)
21518{
21519 do_crypto_3op_1 (0, 2);
21520}
21521
21522static void
21523do_sha1su0 (void)
21524{
21525 do_crypto_3op_1 (0, 3);
21526}
21527
21528static void
21529do_sha256h (void)
21530{
21531 do_crypto_3op_1 (1, 0);
21532}
21533
21534static void
21535do_sha256h2 (void)
21536{
21537 do_crypto_3op_1 (1, 1);
21538}
21539
21540static void
21541do_sha256su1 (void)
21542{
21543 do_crypto_3op_1 (1, 2);
21544}
21545
21546static void
21547do_sha1h (void)
21548{
21549 do_crypto_2op_1 (N_32, -1);
21550}
21551
21552static void
21553do_sha1su1 (void)
21554{
21555 do_crypto_2op_1 (N_32, 0);
21556}
21557
21558static void
21559do_sha256su0 (void)
21560{
21561 do_crypto_2op_1 (N_32, 1);
21562}
21563
21564static void
21565do_crc32_1 (unsigned int poly, unsigned int sz)
21566{
21567 unsigned int Rd = inst.operands[0].reg;
21568 unsigned int Rn = inst.operands[1].reg;
21569 unsigned int Rm = inst.operands[2].reg;
21570
21571 set_pred_insn_type (OUTSIDE_PRED_INSN);
21572 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
21573 inst.instruction |= LOW4 (Rn) << 16;
21574 inst.instruction |= LOW4 (Rm);
21575 inst.instruction |= sz << (thumb_mode ? 4 : 21);
21576 inst.instruction |= poly << (thumb_mode ? 20 : 9);
21577
21578 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
21579 as_warn (UNPRED_REG ("r15"));
21580}
21581
21582static void
21583do_crc32b (void)
21584{
21585 do_crc32_1 (0, 0);
21586}
21587
21588static void
21589do_crc32h (void)
21590{
21591 do_crc32_1 (0, 1);
21592}
21593
21594static void
21595do_crc32w (void)
21596{
21597 do_crc32_1 (0, 2);
21598}
21599
21600static void
21601do_crc32cb (void)
21602{
21603 do_crc32_1 (1, 0);
21604}
21605
21606static void
21607do_crc32ch (void)
21608{
21609 do_crc32_1 (1, 1);
21610}
21611
21612static void
21613do_crc32cw (void)
21614{
21615 do_crc32_1 (1, 2);
21616}
21617
21618static void
21619do_vjcvt (void)
21620{
21621 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21622 _(BAD_FPU));
21623 neon_check_type (2, NS_FD, N_S32, N_F64);
21624 do_vfp_sp_dp_cvt ();
21625 do_vfp_cond_or_thumb ();
21626}
21627
21628static void
21629do_vdot (void)
21630{
21631 enum neon_shape rs;
21632 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21633 set_pred_insn_type (OUTSIDE_PRED_INSN);
21634 if (inst.operands[2].isscalar)
21635 {
21636 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21637 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21638
21639 inst.instruction |= (1 << 25);
21640 int index = inst.operands[2].reg & 0xf;
21641 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21642 inst.operands[2].reg >>= 4;
21643 constraint (!(inst.operands[2].reg < 16),
21644 _("indexed register must be less than 16"));
21645 neon_three_args (rs == NS_QQS);
21646 inst.instruction |= (index << 5);
21647 }
21648 else
21649 {
21650 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21651 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21652 neon_three_args (rs == NS_QQQ);
21653 }
21654}
21655
21656static void
21657do_vmmla (void)
21658{
21659 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21660 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21661
21662 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21663 set_pred_insn_type (OUTSIDE_PRED_INSN);
21664
21665 neon_three_args (1);
21666}
21667
21668\f
21669/* Overall per-instruction processing. */
21670
21671/* We need to be able to fix up arbitrary expressions in some statements.
21672 This is so that we can handle symbols that are an arbitrary distance from
21673 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
21674 which returns part of an address in a form which will be valid for
21675 a data instruction. We do this by pushing the expression into a symbol
21676 in the expr_section, and creating a fix for that. */
21677
21678static void
21679fix_new_arm (fragS * frag,
21680 int where,
21681 short int size,
21682 expressionS * exp,
21683 int pc_rel,
21684 int reloc)
21685{
21686 fixS * new_fix;
21687
21688 switch (exp->X_op)
21689 {
21690 case O_constant:
21691 if (pc_rel)
21692 {
21693 /* Create an absolute valued symbol, so we have something to
21694 refer to in the object file. Unfortunately for us, gas's
21695 generic expression parsing will already have folded out
21696 any use of .set foo/.type foo %function that may have
21697 been used to set type information of the target location,
21698 that's being specified symbolically. We have to presume
21699 the user knows what they are doing. */
21700 char name[16 + 8];
21701 symbolS *symbol;
21702
21703 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
21704
21705 symbol = symbol_find_or_make (name);
21706 S_SET_SEGMENT (symbol, absolute_section);
21707 symbol_set_frag (symbol, &zero_address_frag);
21708 S_SET_VALUE (symbol, exp->X_add_number);
21709 exp->X_op = O_symbol;
21710 exp->X_add_symbol = symbol;
21711 exp->X_add_number = 0;
21712 }
21713 /* FALLTHROUGH */
21714 case O_symbol:
21715 case O_add:
21716 case O_subtract:
21717 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
21718 (enum bfd_reloc_code_real) reloc);
21719 break;
21720
21721 default:
21722 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
21723 pc_rel, (enum bfd_reloc_code_real) reloc);
21724 break;
21725 }
21726
21727 /* Mark whether the fix is to a THUMB instruction, or an ARM
21728 instruction. */
21729 new_fix->tc_fix_data = thumb_mode;
21730}
21731
21732/* Create a frg for an instruction requiring relaxation. */
21733static void
21734output_relax_insn (void)
21735{
21736 char * to;
21737 symbolS *sym;
21738 int offset;
21739
21740 /* The size of the instruction is unknown, so tie the debug info to the
21741 start of the instruction. */
21742 dwarf2_emit_insn (0);
21743
21744 switch (inst.relocs[0].exp.X_op)
21745 {
21746 case O_symbol:
21747 sym = inst.relocs[0].exp.X_add_symbol;
21748 offset = inst.relocs[0].exp.X_add_number;
21749 break;
21750 case O_constant:
21751 sym = NULL;
21752 offset = inst.relocs[0].exp.X_add_number;
21753 break;
21754 default:
21755 sym = make_expr_symbol (&inst.relocs[0].exp);
21756 offset = 0;
21757 break;
21758 }
21759 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
21760 inst.relax, sym, offset, NULL/*offset, opcode*/);
21761 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
21762}
21763
21764/* Write a 32-bit thumb instruction to buf. */
21765static void
21766put_thumb32_insn (char * buf, unsigned long insn)
21767{
21768 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
21769 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
21770}
21771
21772static void
21773output_inst (const char * str)
21774{
21775 char * to = NULL;
21776
21777 if (inst.error)
21778 {
21779 as_bad ("%s -- `%s'", inst.error, str);
21780 return;
21781 }
21782 if (inst.relax)
21783 {
21784 output_relax_insn ();
21785 return;
21786 }
21787 if (inst.size == 0)
21788 return;
21789
21790 to = frag_more (inst.size);
21791 /* PR 9814: Record the thumb mode into the current frag so that we know
21792 what type of NOP padding to use, if necessary. We override any previous
21793 setting so that if the mode has changed then the NOPS that we use will
21794 match the encoding of the last instruction in the frag. */
21795 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21796
21797 if (thumb_mode && (inst.size > THUMB_SIZE))
21798 {
21799 gas_assert (inst.size == (2 * THUMB_SIZE));
21800 put_thumb32_insn (to, inst.instruction);
21801 }
21802 else if (inst.size > INSN_SIZE)
21803 {
21804 gas_assert (inst.size == (2 * INSN_SIZE));
21805 md_number_to_chars (to, inst.instruction, INSN_SIZE);
21806 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
21807 }
21808 else
21809 md_number_to_chars (to, inst.instruction, inst.size);
21810
21811 int r;
21812 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
21813 {
21814 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
21815 fix_new_arm (frag_now, to - frag_now->fr_literal,
21816 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
21817 inst.relocs[r].type);
21818 }
21819
21820 dwarf2_emit_insn (inst.size);
21821}
21822
21823static char *
21824output_it_inst (int cond, int mask, char * to)
21825{
21826 unsigned long instruction = 0xbf00;
21827
21828 mask &= 0xf;
21829 instruction |= mask;
21830 instruction |= cond << 4;
21831
21832 if (to == NULL)
21833 {
21834 to = frag_more (2);
21835#ifdef OBJ_ELF
21836 dwarf2_emit_insn (2);
21837#endif
21838 }
21839
21840 md_number_to_chars (to, instruction, 2);
21841
21842 return to;
21843}
21844
21845/* Tag values used in struct asm_opcode's tag field. */
21846enum opcode_tag
21847{
21848 OT_unconditional, /* Instruction cannot be conditionalized.
21849 The ARM condition field is still 0xE. */
21850 OT_unconditionalF, /* Instruction cannot be conditionalized
21851 and carries 0xF in its ARM condition field. */
21852 OT_csuffix, /* Instruction takes a conditional suffix. */
21853 OT_csuffixF, /* Some forms of the instruction take a scalar
21854 conditional suffix, others place 0xF where the
21855 condition field would be, others take a vector
21856 conditional suffix. */
21857 OT_cinfix3, /* Instruction takes a conditional infix,
21858 beginning at character index 3. (In
21859 unified mode, it becomes a suffix.) */
21860 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
21861 tsts, cmps, cmns, and teqs. */
21862 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
21863 character index 3, even in unified mode. Used for
21864 legacy instructions where suffix and infix forms
21865 may be ambiguous. */
21866 OT_csuf_or_in3, /* Instruction takes either a conditional
21867 suffix or an infix at character index 3. */
21868 OT_odd_infix_unc, /* This is the unconditional variant of an
21869 instruction that takes a conditional infix
21870 at an unusual position. In unified mode,
21871 this variant will accept a suffix. */
21872 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
21873 are the conditional variants of instructions that
21874 take conditional infixes in unusual positions.
21875 The infix appears at character index
21876 (tag - OT_odd_infix_0). These are not accepted
21877 in unified mode. */
21878};
21879
21880/* Subroutine of md_assemble, responsible for looking up the primary
21881 opcode from the mnemonic the user wrote. STR points to the
21882 beginning of the mnemonic.
21883
21884 This is not simply a hash table lookup, because of conditional
21885 variants. Most instructions have conditional variants, which are
21886 expressed with a _conditional affix_ to the mnemonic. If we were
21887 to encode each conditional variant as a literal string in the opcode
21888 table, it would have approximately 20,000 entries.
21889
21890 Most mnemonics take this affix as a suffix, and in unified syntax,
21891 'most' is upgraded to 'all'. However, in the divided syntax, some
21892 instructions take the affix as an infix, notably the s-variants of
21893 the arithmetic instructions. Of those instructions, all but six
21894 have the infix appear after the third character of the mnemonic.
21895
21896 Accordingly, the algorithm for looking up primary opcodes given
21897 an identifier is:
21898
21899 1. Look up the identifier in the opcode table.
21900 If we find a match, go to step U.
21901
21902 2. Look up the last two characters of the identifier in the
21903 conditions table. If we find a match, look up the first N-2
21904 characters of the identifier in the opcode table. If we
21905 find a match, go to step CE.
21906
21907 3. Look up the fourth and fifth characters of the identifier in
21908 the conditions table. If we find a match, extract those
21909 characters from the identifier, and look up the remaining
21910 characters in the opcode table. If we find a match, go
21911 to step CM.
21912
21913 4. Fail.
21914
21915 U. Examine the tag field of the opcode structure, in case this is
21916 one of the six instructions with its conditional infix in an
21917 unusual place. If it is, the tag tells us where to find the
21918 infix; look it up in the conditions table and set inst.cond
21919 accordingly. Otherwise, this is an unconditional instruction.
21920 Again set inst.cond accordingly. Return the opcode structure.
21921
21922 CE. Examine the tag field to make sure this is an instruction that
21923 should receive a conditional suffix. If it is not, fail.
21924 Otherwise, set inst.cond from the suffix we already looked up,
21925 and return the opcode structure.
21926
21927 CM. Examine the tag field to make sure this is an instruction that
21928 should receive a conditional infix after the third character.
21929 If it is not, fail. Otherwise, undo the edits to the current
21930 line of input and proceed as for case CE. */
21931
21932static const struct asm_opcode *
21933opcode_lookup (char **str)
21934{
21935 char *end, *base;
21936 char *affix;
21937 const struct asm_opcode *opcode;
21938 const struct asm_cond *cond;
21939 char save[2];
21940
21941 /* Scan up to the end of the mnemonic, which must end in white space,
21942 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
21943 for (base = end = *str; *end != '\0'; end++)
21944 if (*end == ' ' || *end == '.')
21945 break;
21946
21947 if (end == base)
21948 return NULL;
21949
21950 /* Handle a possible width suffix and/or Neon type suffix. */
21951 if (end[0] == '.')
21952 {
21953 int offset = 2;
21954
21955 /* The .w and .n suffixes are only valid if the unified syntax is in
21956 use. */
21957 if (unified_syntax && end[1] == 'w')
21958 inst.size_req = 4;
21959 else if (unified_syntax && end[1] == 'n')
21960 inst.size_req = 2;
21961 else
21962 offset = 0;
21963
21964 inst.vectype.elems = 0;
21965
21966 *str = end + offset;
21967
21968 if (end[offset] == '.')
21969 {
21970 /* See if we have a Neon type suffix (possible in either unified or
21971 non-unified ARM syntax mode). */
21972 if (parse_neon_type (&inst.vectype, str) == FAIL)
21973 return NULL;
21974 }
21975 else if (end[offset] != '\0' && end[offset] != ' ')
21976 return NULL;
21977 }
21978 else
21979 *str = end;
21980
21981 /* Look for unaffixed or special-case affixed mnemonic. */
21982 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
21983 end - base);
21984 if (opcode)
21985 {
21986 /* step U */
21987 if (opcode->tag < OT_odd_infix_0)
21988 {
21989 inst.cond = COND_ALWAYS;
21990 return opcode;
21991 }
21992
21993 if (warn_on_deprecated && unified_syntax)
21994 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
21995 affix = base + (opcode->tag - OT_odd_infix_0);
21996 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
21997 gas_assert (cond);
21998
21999 inst.cond = cond->value;
22000 return opcode;
22001 }
22002 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22003 {
22004 /* Cannot have a conditional suffix on a mnemonic of less than a character.
22005 */
22006 if (end - base < 2)
22007 return NULL;
22008 affix = end - 1;
22009 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
22010 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22011 affix - base);
22012 /* If this opcode can not be vector predicated then don't accept it with a
22013 vector predication code. */
22014 if (opcode && !opcode->mayBeVecPred)
22015 opcode = NULL;
22016 }
22017 if (!opcode || !cond)
22018 {
22019 /* Cannot have a conditional suffix on a mnemonic of less than two
22020 characters. */
22021 if (end - base < 3)
22022 return NULL;
22023
22024 /* Look for suffixed mnemonic. */
22025 affix = end - 2;
22026 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22027 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22028 affix - base);
22029 }
22030
22031 if (opcode && cond)
22032 {
22033 /* step CE */
22034 switch (opcode->tag)
22035 {
22036 case OT_cinfix3_legacy:
22037 /* Ignore conditional suffixes matched on infix only mnemonics. */
22038 break;
22039
22040 case OT_cinfix3:
22041 case OT_cinfix3_deprecated:
22042 case OT_odd_infix_unc:
22043 if (!unified_syntax)
22044 return NULL;
22045 /* Fall through. */
22046
22047 case OT_csuffix:
22048 case OT_csuffixF:
22049 case OT_csuf_or_in3:
22050 inst.cond = cond->value;
22051 return opcode;
22052
22053 case OT_unconditional:
22054 case OT_unconditionalF:
22055 if (thumb_mode)
22056 inst.cond = cond->value;
22057 else
22058 {
22059 /* Delayed diagnostic. */
22060 inst.error = BAD_COND;
22061 inst.cond = COND_ALWAYS;
22062 }
22063 return opcode;
22064
22065 default:
22066 return NULL;
22067 }
22068 }
22069
22070 /* Cannot have a usual-position infix on a mnemonic of less than
22071 six characters (five would be a suffix). */
22072 if (end - base < 6)
22073 return NULL;
22074
22075 /* Look for infixed mnemonic in the usual position. */
22076 affix = base + 3;
22077 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22078 if (!cond)
22079 return NULL;
22080
22081 memcpy (save, affix, 2);
22082 memmove (affix, affix + 2, (end - affix) - 2);
22083 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22084 (end - base) - 2);
22085 memmove (affix + 2, affix, (end - affix) - 2);
22086 memcpy (affix, save, 2);
22087
22088 if (opcode
22089 && (opcode->tag == OT_cinfix3
22090 || opcode->tag == OT_cinfix3_deprecated
22091 || opcode->tag == OT_csuf_or_in3
22092 || opcode->tag == OT_cinfix3_legacy))
22093 {
22094 /* Step CM. */
22095 if (warn_on_deprecated && unified_syntax
22096 && (opcode->tag == OT_cinfix3
22097 || opcode->tag == OT_cinfix3_deprecated))
22098 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22099
22100 inst.cond = cond->value;
22101 return opcode;
22102 }
22103
22104 return NULL;
22105}
22106
22107/* This function generates an initial IT instruction, leaving its block
22108 virtually open for the new instructions. Eventually,
22109 the mask will be updated by now_pred_add_mask () each time
22110 a new instruction needs to be included in the IT block.
22111 Finally, the block is closed with close_automatic_it_block ().
22112 The block closure can be requested either from md_assemble (),
22113 a tencode (), or due to a label hook. */
22114
22115static void
22116new_automatic_it_block (int cond)
22117{
22118 now_pred.state = AUTOMATIC_PRED_BLOCK;
22119 now_pred.mask = 0x18;
22120 now_pred.cc = cond;
22121 now_pred.block_length = 1;
22122 mapping_state (MAP_THUMB);
22123 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22124 now_pred.warn_deprecated = FALSE;
22125 now_pred.insn_cond = TRUE;
22126}
22127
22128/* Close an automatic IT block.
22129 See comments in new_automatic_it_block (). */
22130
22131static void
22132close_automatic_it_block (void)
22133{
22134 now_pred.mask = 0x10;
22135 now_pred.block_length = 0;
22136}
22137
22138/* Update the mask of the current automatically-generated IT
22139 instruction. See comments in new_automatic_it_block (). */
22140
22141static void
22142now_pred_add_mask (int cond)
22143{
22144#define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
22145#define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
22146 | ((bitvalue) << (nbit)))
22147 const int resulting_bit = (cond & 1);
22148
22149 now_pred.mask &= 0xf;
22150 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22151 resulting_bit,
22152 (5 - now_pred.block_length));
22153 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22154 1,
22155 ((5 - now_pred.block_length) - 1));
22156 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22157
22158#undef CLEAR_BIT
22159#undef SET_BIT_VALUE
22160}
22161
22162/* The IT blocks handling machinery is accessed through the these functions:
22163 it_fsm_pre_encode () from md_assemble ()
22164 set_pred_insn_type () optional, from the tencode functions
22165 set_pred_insn_type_last () ditto
22166 in_pred_block () ditto
22167 it_fsm_post_encode () from md_assemble ()
22168 force_automatic_it_block_close () from label handling functions
22169
22170 Rationale:
22171 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22172 initializing the IT insn type with a generic initial value depending
22173 on the inst.condition.
22174 2) During the tencode function, two things may happen:
22175 a) The tencode function overrides the IT insn type by
22176 calling either set_pred_insn_type (type) or
22177 set_pred_insn_type_last ().
22178 b) The tencode function queries the IT block state by
22179 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22180
22181 Both set_pred_insn_type and in_pred_block run the internal FSM state
22182 handling function (handle_pred_state), because: a) setting the IT insn
22183 type may incur in an invalid state (exiting the function),
22184 and b) querying the state requires the FSM to be updated.
22185 Specifically we want to avoid creating an IT block for conditional
22186 branches, so it_fsm_pre_encode is actually a guess and we can't
22187 determine whether an IT block is required until the tencode () routine
22188 has decided what type of instruction this actually it.
22189 Because of this, if set_pred_insn_type and in_pred_block have to be
22190 used, set_pred_insn_type has to be called first.
22191
22192 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22193 that determines the insn IT type depending on the inst.cond code.
22194 When a tencode () routine encodes an instruction that can be
22195 either outside an IT block, or, in the case of being inside, has to be
22196 the last one, set_pred_insn_type_last () will determine the proper
22197 IT instruction type based on the inst.cond code. Otherwise,
22198 set_pred_insn_type can be called for overriding that logic or
22199 for covering other cases.
22200
22201 Calling handle_pred_state () may not transition the IT block state to
22202 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22203 still queried. Instead, if the FSM determines that the state should
22204 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22205 after the tencode () function: that's what it_fsm_post_encode () does.
22206
22207 Since in_pred_block () calls the state handling function to get an
22208 updated state, an error may occur (due to invalid insns combination).
22209 In that case, inst.error is set.
22210 Therefore, inst.error has to be checked after the execution of
22211 the tencode () routine.
22212
22213 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22214 any pending state change (if any) that didn't take place in
22215 handle_pred_state () as explained above. */
22216
22217static void
22218it_fsm_pre_encode (void)
22219{
22220 if (inst.cond != COND_ALWAYS)
22221 inst.pred_insn_type = INSIDE_IT_INSN;
22222 else
22223 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22224
22225 now_pred.state_handled = 0;
22226}
22227
22228/* IT state FSM handling function. */
22229/* MVE instructions and non-MVE instructions are handled differently because of
22230 the introduction of VPT blocks.
22231 Specifications say that any non-MVE instruction inside a VPT block is
22232 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
22233 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
22234 few exceptions we have MVE_UNPREDICABLE_INSN.
22235 The error messages provided depending on the different combinations possible
22236 are described in the cases below:
22237 For 'most' MVE instructions:
22238 1) In an IT block, with an IT code: syntax error
22239 2) In an IT block, with a VPT code: error: must be in a VPT block
22240 3) In an IT block, with no code: warning: UNPREDICTABLE
22241 4) In a VPT block, with an IT code: syntax error
22242 5) In a VPT block, with a VPT code: OK!
22243 6) In a VPT block, with no code: error: missing code
22244 7) Outside a pred block, with an IT code: error: syntax error
22245 8) Outside a pred block, with a VPT code: error: should be in a VPT block
22246 9) Outside a pred block, with no code: OK!
22247 For non-MVE instructions:
22248 10) In an IT block, with an IT code: OK!
22249 11) In an IT block, with a VPT code: syntax error
22250 12) In an IT block, with no code: error: missing code
22251 13) In a VPT block, with an IT code: error: should be in an IT block
22252 14) In a VPT block, with a VPT code: syntax error
22253 15) In a VPT block, with no code: UNPREDICTABLE
22254 16) Outside a pred block, with an IT code: error: should be in an IT block
22255 17) Outside a pred block, with a VPT code: syntax error
22256 18) Outside a pred block, with no code: OK!
22257 */
22258
22259
22260static int
22261handle_pred_state (void)
22262{
22263 now_pred.state_handled = 1;
22264 now_pred.insn_cond = FALSE;
22265
22266 switch (now_pred.state)
22267 {
22268 case OUTSIDE_PRED_BLOCK:
22269 switch (inst.pred_insn_type)
22270 {
22271 case MVE_UNPREDICABLE_INSN:
22272 case MVE_OUTSIDE_PRED_INSN:
22273 if (inst.cond < COND_ALWAYS)
22274 {
22275 /* Case 7: Outside a pred block, with an IT code: error: syntax
22276 error. */
22277 inst.error = BAD_SYNTAX;
22278 return FAIL;
22279 }
22280 /* Case 9: Outside a pred block, with no code: OK! */
22281 break;
22282 case OUTSIDE_PRED_INSN:
22283 if (inst.cond > COND_ALWAYS)
22284 {
22285 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22286 */
22287 inst.error = BAD_SYNTAX;
22288 return FAIL;
22289 }
22290 /* Case 18: Outside a pred block, with no code: OK! */
22291 break;
22292
22293 case INSIDE_VPT_INSN:
22294 /* Case 8: Outside a pred block, with a VPT code: error: should be in
22295 a VPT block. */
22296 inst.error = BAD_OUT_VPT;
22297 return FAIL;
22298
22299 case INSIDE_IT_INSN:
22300 case INSIDE_IT_LAST_INSN:
22301 if (inst.cond < COND_ALWAYS)
22302 {
22303 /* Case 16: Outside a pred block, with an IT code: error: should
22304 be in an IT block. */
22305 if (thumb_mode == 0)
22306 {
22307 if (unified_syntax
22308 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22309 as_tsktsk (_("Warning: conditional outside an IT block"\
22310 " for Thumb."));
22311 }
22312 else
22313 {
22314 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22315 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22316 {
22317 /* Automatically generate the IT instruction. */
22318 new_automatic_it_block (inst.cond);
22319 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22320 close_automatic_it_block ();
22321 }
22322 else
22323 {
22324 inst.error = BAD_OUT_IT;
22325 return FAIL;
22326 }
22327 }
22328 break;
22329 }
22330 else if (inst.cond > COND_ALWAYS)
22331 {
22332 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22333 */
22334 inst.error = BAD_SYNTAX;
22335 return FAIL;
22336 }
22337 else
22338 gas_assert (0);
22339 case IF_INSIDE_IT_LAST_INSN:
22340 case NEUTRAL_IT_INSN:
22341 break;
22342
22343 case VPT_INSN:
22344 if (inst.cond != COND_ALWAYS)
22345 first_error (BAD_SYNTAX);
22346 now_pred.state = MANUAL_PRED_BLOCK;
22347 now_pred.block_length = 0;
22348 now_pred.type = VECTOR_PRED;
22349 now_pred.cc = 0;
22350 break;
22351 case IT_INSN:
22352 now_pred.state = MANUAL_PRED_BLOCK;
22353 now_pred.block_length = 0;
22354 now_pred.type = SCALAR_PRED;
22355 break;
22356 }
22357 break;
22358
22359 case AUTOMATIC_PRED_BLOCK:
22360 /* Three things may happen now:
22361 a) We should increment current it block size;
22362 b) We should close current it block (closing insn or 4 insns);
22363 c) We should close current it block and start a new one (due
22364 to incompatible conditions or
22365 4 insns-length block reached). */
22366
22367 switch (inst.pred_insn_type)
22368 {
22369 case INSIDE_VPT_INSN:
22370 case VPT_INSN:
22371 case MVE_UNPREDICABLE_INSN:
22372 case MVE_OUTSIDE_PRED_INSN:
22373 gas_assert (0);
22374 case OUTSIDE_PRED_INSN:
22375 /* The closure of the block shall happen immediately,
22376 so any in_pred_block () call reports the block as closed. */
22377 force_automatic_it_block_close ();
22378 break;
22379
22380 case INSIDE_IT_INSN:
22381 case INSIDE_IT_LAST_INSN:
22382 case IF_INSIDE_IT_LAST_INSN:
22383 now_pred.block_length++;
22384
22385 if (now_pred.block_length > 4
22386 || !now_pred_compatible (inst.cond))
22387 {
22388 force_automatic_it_block_close ();
22389 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
22390 new_automatic_it_block (inst.cond);
22391 }
22392 else
22393 {
22394 now_pred.insn_cond = TRUE;
22395 now_pred_add_mask (inst.cond);
22396 }
22397
22398 if (now_pred.state == AUTOMATIC_PRED_BLOCK
22399 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
22400 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
22401 close_automatic_it_block ();
22402 break;
22403
22404 case NEUTRAL_IT_INSN:
22405 now_pred.block_length++;
22406 now_pred.insn_cond = TRUE;
22407
22408 if (now_pred.block_length > 4)
22409 force_automatic_it_block_close ();
22410 else
22411 now_pred_add_mask (now_pred.cc & 1);
22412 break;
22413
22414 case IT_INSN:
22415 close_automatic_it_block ();
22416 now_pred.state = MANUAL_PRED_BLOCK;
22417 break;
22418 }
22419 break;
22420
22421 case MANUAL_PRED_BLOCK:
22422 {
22423 int cond, is_last;
22424 if (now_pred.type == SCALAR_PRED)
22425 {
22426 /* Check conditional suffixes. */
22427 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
22428 now_pred.mask <<= 1;
22429 now_pred.mask &= 0x1f;
22430 is_last = (now_pred.mask == 0x10);
22431 }
22432 else
22433 {
22434 now_pred.cc ^= (now_pred.mask >> 4);
22435 cond = now_pred.cc + 0xf;
22436 now_pred.mask <<= 1;
22437 now_pred.mask &= 0x1f;
22438 is_last = now_pred.mask == 0x10;
22439 }
22440 now_pred.insn_cond = TRUE;
22441
22442 switch (inst.pred_insn_type)
22443 {
22444 case OUTSIDE_PRED_INSN:
22445 if (now_pred.type == SCALAR_PRED)
22446 {
22447 if (inst.cond == COND_ALWAYS)
22448 {
22449 /* Case 12: In an IT block, with no code: error: missing
22450 code. */
22451 inst.error = BAD_NOT_IT;
22452 return FAIL;
22453 }
22454 else if (inst.cond > COND_ALWAYS)
22455 {
22456 /* Case 11: In an IT block, with a VPT code: syntax error.
22457 */
22458 inst.error = BAD_SYNTAX;
22459 return FAIL;
22460 }
22461 else if (thumb_mode)
22462 {
22463 /* This is for some special cases where a non-MVE
22464 instruction is not allowed in an IT block, such as cbz,
22465 but are put into one with a condition code.
22466 You could argue this should be a syntax error, but we
22467 gave the 'not allowed in IT block' diagnostic in the
22468 past so we will keep doing so. */
22469 inst.error = BAD_NOT_IT;
22470 return FAIL;
22471 }
22472 break;
22473 }
22474 else
22475 {
22476 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
22477 as_tsktsk (MVE_NOT_VPT);
22478 return SUCCESS;
22479 }
22480 case MVE_OUTSIDE_PRED_INSN:
22481 if (now_pred.type == SCALAR_PRED)
22482 {
22483 if (inst.cond == COND_ALWAYS)
22484 {
22485 /* Case 3: In an IT block, with no code: warning:
22486 UNPREDICTABLE. */
22487 as_tsktsk (MVE_NOT_IT);
22488 return SUCCESS;
22489 }
22490 else if (inst.cond < COND_ALWAYS)
22491 {
22492 /* Case 1: In an IT block, with an IT code: syntax error.
22493 */
22494 inst.error = BAD_SYNTAX;
22495 return FAIL;
22496 }
22497 else
22498 gas_assert (0);
22499 }
22500 else
22501 {
22502 if (inst.cond < COND_ALWAYS)
22503 {
22504 /* Case 4: In a VPT block, with an IT code: syntax error.
22505 */
22506 inst.error = BAD_SYNTAX;
22507 return FAIL;
22508 }
22509 else if (inst.cond == COND_ALWAYS)
22510 {
22511 /* Case 6: In a VPT block, with no code: error: missing
22512 code. */
22513 inst.error = BAD_NOT_VPT;
22514 return FAIL;
22515 }
22516 else
22517 {
22518 gas_assert (0);
22519 }
22520 }
22521 case MVE_UNPREDICABLE_INSN:
22522 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
22523 return SUCCESS;
22524 case INSIDE_IT_INSN:
22525 if (inst.cond > COND_ALWAYS)
22526 {
22527 /* Case 11: In an IT block, with a VPT code: syntax error. */
22528 /* Case 14: In a VPT block, with a VPT code: syntax error. */
22529 inst.error = BAD_SYNTAX;
22530 return FAIL;
22531 }
22532 else if (now_pred.type == SCALAR_PRED)
22533 {
22534 /* Case 10: In an IT block, with an IT code: OK! */
22535 if (cond != inst.cond)
22536 {
22537 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
22538 BAD_VPT_COND;
22539 return FAIL;
22540 }
22541 }
22542 else
22543 {
22544 /* Case 13: In a VPT block, with an IT code: error: should be
22545 in an IT block. */
22546 inst.error = BAD_OUT_IT;
22547 return FAIL;
22548 }
22549 break;
22550
22551 case INSIDE_VPT_INSN:
22552 if (now_pred.type == SCALAR_PRED)
22553 {
22554 /* Case 2: In an IT block, with a VPT code: error: must be in a
22555 VPT block. */
22556 inst.error = BAD_OUT_VPT;
22557 return FAIL;
22558 }
22559 /* Case 5: In a VPT block, with a VPT code: OK! */
22560 else if (cond != inst.cond)
22561 {
22562 inst.error = BAD_VPT_COND;
22563 return FAIL;
22564 }
22565 break;
22566 case INSIDE_IT_LAST_INSN:
22567 case IF_INSIDE_IT_LAST_INSN:
22568 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
22569 {
22570 /* Case 4: In a VPT block, with an IT code: syntax error. */
22571 /* Case 11: In an IT block, with a VPT code: syntax error. */
22572 inst.error = BAD_SYNTAX;
22573 return FAIL;
22574 }
22575 else if (cond != inst.cond)
22576 {
22577 inst.error = BAD_IT_COND;
22578 return FAIL;
22579 }
22580 if (!is_last)
22581 {
22582 inst.error = BAD_BRANCH;
22583 return FAIL;
22584 }
22585 break;
22586
22587 case NEUTRAL_IT_INSN:
22588 /* The BKPT instruction is unconditional even in a IT or VPT
22589 block. */
22590 break;
22591
22592 case IT_INSN:
22593 if (now_pred.type == SCALAR_PRED)
22594 {
22595 inst.error = BAD_IT_IT;
22596 return FAIL;
22597 }
22598 /* fall through. */
22599 case VPT_INSN:
22600 if (inst.cond == COND_ALWAYS)
22601 {
22602 /* Executing a VPT/VPST instruction inside an IT block or a
22603 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
22604 */
22605 if (now_pred.type == SCALAR_PRED)
22606 as_tsktsk (MVE_NOT_IT);
22607 else
22608 as_tsktsk (MVE_NOT_VPT);
22609 return SUCCESS;
22610 }
22611 else
22612 {
22613 /* VPT/VPST do not accept condition codes. */
22614 inst.error = BAD_SYNTAX;
22615 return FAIL;
22616 }
22617 }
22618 }
22619 break;
22620 }
22621
22622 return SUCCESS;
22623}
22624
22625struct depr_insn_mask
22626{
22627 unsigned long pattern;
22628 unsigned long mask;
22629 const char* description;
22630};
22631
22632/* List of 16-bit instruction patterns deprecated in an IT block in
22633 ARMv8. */
22634static const struct depr_insn_mask depr_it_insns[] = {
22635 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
22636 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
22637 { 0xa000, 0xb800, N_("ADR") },
22638 { 0x4800, 0xf800, N_("Literal loads") },
22639 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
22640 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
22641 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
22642 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
22643 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
22644 { 0, 0, NULL }
22645};
22646
22647static void
22648it_fsm_post_encode (void)
22649{
22650 int is_last;
22651
22652 if (!now_pred.state_handled)
22653 handle_pred_state ();
22654
22655 if (now_pred.insn_cond
22656 && !now_pred.warn_deprecated
22657 && warn_on_deprecated
22658 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
22659 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
22660 {
22661 if (inst.instruction >= 0x10000)
22662 {
22663 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
22664 "performance deprecated in ARMv8-A and ARMv8-R"));
22665 now_pred.warn_deprecated = TRUE;
22666 }
22667 else
22668 {
22669 const struct depr_insn_mask *p = depr_it_insns;
22670
22671 while (p->mask != 0)
22672 {
22673 if ((inst.instruction & p->mask) == p->pattern)
22674 {
22675 as_tsktsk (_("IT blocks containing 16-bit Thumb "
22676 "instructions of the following class are "
22677 "performance deprecated in ARMv8-A and "
22678 "ARMv8-R: %s"), p->description);
22679 now_pred.warn_deprecated = TRUE;
22680 break;
22681 }
22682
22683 ++p;
22684 }
22685 }
22686
22687 if (now_pred.block_length > 1)
22688 {
22689 as_tsktsk (_("IT blocks containing more than one conditional "
22690 "instruction are performance deprecated in ARMv8-A and "
22691 "ARMv8-R"));
22692 now_pred.warn_deprecated = TRUE;
22693 }
22694 }
22695
22696 is_last = (now_pred.mask == 0x10);
22697 if (is_last)
22698 {
22699 now_pred.state = OUTSIDE_PRED_BLOCK;
22700 now_pred.mask = 0;
22701 }
22702}
22703
22704static void
22705force_automatic_it_block_close (void)
22706{
22707 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
22708 {
22709 close_automatic_it_block ();
22710 now_pred.state = OUTSIDE_PRED_BLOCK;
22711 now_pred.mask = 0;
22712 }
22713}
22714
22715static int
22716in_pred_block (void)
22717{
22718 if (!now_pred.state_handled)
22719 handle_pred_state ();
22720
22721 return now_pred.state != OUTSIDE_PRED_BLOCK;
22722}
22723
22724/* Whether OPCODE only has T32 encoding. Since this function is only used by
22725 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
22726 here, hence the "known" in the function name. */
22727
22728static bfd_boolean
22729known_t32_only_insn (const struct asm_opcode *opcode)
22730{
22731 /* Original Thumb-1 wide instruction. */
22732 if (opcode->tencode == do_t_blx
22733 || opcode->tencode == do_t_branch23
22734 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
22735 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
22736 return TRUE;
22737
22738 /* Wide-only instruction added to ARMv8-M Baseline. */
22739 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
22740 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
22741 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
22742 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
22743 return TRUE;
22744
22745 return FALSE;
22746}
22747
22748/* Whether wide instruction variant can be used if available for a valid OPCODE
22749 in ARCH. */
22750
22751static bfd_boolean
22752t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
22753{
22754 if (known_t32_only_insn (opcode))
22755 return TRUE;
22756
22757 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
22758 of variant T3 of B.W is checked in do_t_branch. */
22759 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22760 && opcode->tencode == do_t_branch)
22761 return TRUE;
22762
22763 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
22764 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22765 && opcode->tencode == do_t_mov_cmp
22766 /* Make sure CMP instruction is not affected. */
22767 && opcode->aencode == do_mov)
22768 return TRUE;
22769
22770 /* Wide instruction variants of all instructions with narrow *and* wide
22771 variants become available with ARMv6t2. Other opcodes are either
22772 narrow-only or wide-only and are thus available if OPCODE is valid. */
22773 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
22774 return TRUE;
22775
22776 /* OPCODE with narrow only instruction variant or wide variant not
22777 available. */
22778 return FALSE;
22779}
22780
22781void
22782md_assemble (char *str)
22783{
22784 char *p = str;
22785 const struct asm_opcode * opcode;
22786
22787 /* Align the previous label if needed. */
22788 if (last_label_seen != NULL)
22789 {
22790 symbol_set_frag (last_label_seen, frag_now);
22791 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
22792 S_SET_SEGMENT (last_label_seen, now_seg);
22793 }
22794
22795 memset (&inst, '\0', sizeof (inst));
22796 int r;
22797 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22798 inst.relocs[r].type = BFD_RELOC_UNUSED;
22799
22800 opcode = opcode_lookup (&p);
22801 if (!opcode)
22802 {
22803 /* It wasn't an instruction, but it might be a register alias of
22804 the form alias .req reg, or a Neon .dn/.qn directive. */
22805 if (! create_register_alias (str, p)
22806 && ! create_neon_reg_alias (str, p))
22807 as_bad (_("bad instruction `%s'"), str);
22808
22809 return;
22810 }
22811
22812 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
22813 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
22814
22815 /* The value which unconditional instructions should have in place of the
22816 condition field. */
22817 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
22818
22819 if (thumb_mode)
22820 {
22821 arm_feature_set variant;
22822
22823 variant = cpu_variant;
22824 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
22825 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
22826 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
22827 /* Check that this instruction is supported for this CPU. */
22828 if (!opcode->tvariant
22829 || (thumb_mode == 1
22830 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
22831 {
22832 if (opcode->tencode == do_t_swi)
22833 as_bad (_("SVC is not permitted on this architecture"));
22834 else
22835 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
22836 return;
22837 }
22838 if (inst.cond != COND_ALWAYS && !unified_syntax
22839 && opcode->tencode != do_t_branch)
22840 {
22841 as_bad (_("Thumb does not support conditional execution"));
22842 return;
22843 }
22844
22845 /* Two things are addressed here:
22846 1) Implicit require narrow instructions on Thumb-1.
22847 This avoids relaxation accidentally introducing Thumb-2
22848 instructions.
22849 2) Reject wide instructions in non Thumb-2 cores.
22850
22851 Only instructions with narrow and wide variants need to be handled
22852 but selecting all non wide-only instructions is easier. */
22853 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
22854 && !t32_insn_ok (variant, opcode))
22855 {
22856 if (inst.size_req == 0)
22857 inst.size_req = 2;
22858 else if (inst.size_req == 4)
22859 {
22860 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
22861 as_bad (_("selected processor does not support 32bit wide "
22862 "variant of instruction `%s'"), str);
22863 else
22864 as_bad (_("selected processor does not support `%s' in "
22865 "Thumb-2 mode"), str);
22866 return;
22867 }
22868 }
22869
22870 inst.instruction = opcode->tvalue;
22871
22872 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
22873 {
22874 /* Prepare the pred_insn_type for those encodings that don't set
22875 it. */
22876 it_fsm_pre_encode ();
22877
22878 opcode->tencode ();
22879
22880 it_fsm_post_encode ();
22881 }
22882
22883 if (!(inst.error || inst.relax))
22884 {
22885 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
22886 inst.size = (inst.instruction > 0xffff ? 4 : 2);
22887 if (inst.size_req && inst.size_req != inst.size)
22888 {
22889 as_bad (_("cannot honor width suffix -- `%s'"), str);
22890 return;
22891 }
22892 }
22893
22894 /* Something has gone badly wrong if we try to relax a fixed size
22895 instruction. */
22896 gas_assert (inst.size_req == 0 || !inst.relax);
22897
22898 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
22899 *opcode->tvariant);
22900 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
22901 set those bits when Thumb-2 32-bit instructions are seen. The impact
22902 of relaxable instructions will be considered later after we finish all
22903 relaxation. */
22904 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
22905 variant = arm_arch_none;
22906 else
22907 variant = cpu_variant;
22908 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
22909 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
22910 arm_ext_v6t2);
22911
22912 check_neon_suffixes;
22913
22914 if (!inst.error)
22915 {
22916 mapping_state (MAP_THUMB);
22917 }
22918 }
22919 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22920 {
22921 bfd_boolean is_bx;
22922
22923 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
22924 is_bx = (opcode->aencode == do_bx);
22925
22926 /* Check that this instruction is supported for this CPU. */
22927 if (!(is_bx && fix_v4bx)
22928 && !(opcode->avariant &&
22929 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
22930 {
22931 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
22932 return;
22933 }
22934 if (inst.size_req)
22935 {
22936 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
22937 return;
22938 }
22939
22940 inst.instruction = opcode->avalue;
22941 if (opcode->tag == OT_unconditionalF)
22942 inst.instruction |= 0xFU << 28;
22943 else
22944 inst.instruction |= inst.cond << 28;
22945 inst.size = INSN_SIZE;
22946 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
22947 {
22948 it_fsm_pre_encode ();
22949 opcode->aencode ();
22950 it_fsm_post_encode ();
22951 }
22952 /* Arm mode bx is marked as both v4T and v5 because it's still required
22953 on a hypothetical non-thumb v5 core. */
22954 if (is_bx)
22955 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
22956 else
22957 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
22958 *opcode->avariant);
22959
22960 check_neon_suffixes;
22961
22962 if (!inst.error)
22963 {
22964 mapping_state (MAP_ARM);
22965 }
22966 }
22967 else
22968 {
22969 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
22970 "-- `%s'"), str);
22971 return;
22972 }
22973 output_inst (str);
22974}
22975
22976static void
22977check_pred_blocks_finished (void)
22978{
22979#ifdef OBJ_ELF
22980 asection *sect;
22981
22982 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
22983 if (seg_info (sect)->tc_segment_info_data.current_pred.state
22984 == MANUAL_PRED_BLOCK)
22985 {
22986 if (now_pred.type == SCALAR_PRED)
22987 as_warn (_("section '%s' finished with an open IT block."),
22988 sect->name);
22989 else
22990 as_warn (_("section '%s' finished with an open VPT/VPST block."),
22991 sect->name);
22992 }
22993#else
22994 if (now_pred.state == MANUAL_PRED_BLOCK)
22995 {
22996 if (now_pred.type == SCALAR_PRED)
22997 as_warn (_("file finished with an open IT block."));
22998 else
22999 as_warn (_("file finished with an open VPT/VPST block."));
23000 }
23001#endif
23002}
23003
23004/* Various frobbings of labels and their addresses. */
23005
23006void
23007arm_start_line_hook (void)
23008{
23009 last_label_seen = NULL;
23010}
23011
23012void
23013arm_frob_label (symbolS * sym)
23014{
23015 last_label_seen = sym;
23016
23017 ARM_SET_THUMB (sym, thumb_mode);
23018
23019#if defined OBJ_COFF || defined OBJ_ELF
23020 ARM_SET_INTERWORK (sym, support_interwork);
23021#endif
23022
23023 force_automatic_it_block_close ();
23024
23025 /* Note - do not allow local symbols (.Lxxx) to be labelled
23026 as Thumb functions. This is because these labels, whilst
23027 they exist inside Thumb code, are not the entry points for
23028 possible ARM->Thumb calls. Also, these labels can be used
23029 as part of a computed goto or switch statement. eg gcc
23030 can generate code that looks like this:
23031
23032 ldr r2, [pc, .Laaa]
23033 lsl r3, r3, #2
23034 ldr r2, [r3, r2]
23035 mov pc, r2
23036
23037 .Lbbb: .word .Lxxx
23038 .Lccc: .word .Lyyy
23039 ..etc...
23040 .Laaa: .word Lbbb
23041
23042 The first instruction loads the address of the jump table.
23043 The second instruction converts a table index into a byte offset.
23044 The third instruction gets the jump address out of the table.
23045 The fourth instruction performs the jump.
23046
23047 If the address stored at .Laaa is that of a symbol which has the
23048 Thumb_Func bit set, then the linker will arrange for this address
23049 to have the bottom bit set, which in turn would mean that the
23050 address computation performed by the third instruction would end
23051 up with the bottom bit set. Since the ARM is capable of unaligned
23052 word loads, the instruction would then load the incorrect address
23053 out of the jump table, and chaos would ensue. */
23054 if (label_is_thumb_function_name
23055 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23056 && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23057 {
23058 /* When the address of a Thumb function is taken the bottom
23059 bit of that address should be set. This will allow
23060 interworking between Arm and Thumb functions to work
23061 correctly. */
23062
23063 THUMB_SET_FUNC (sym, 1);
23064
23065 label_is_thumb_function_name = FALSE;
23066 }
23067
23068 dwarf2_emit_label (sym);
23069}
23070
23071bfd_boolean
23072arm_data_in_code (void)
23073{
23074 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
23075 {
23076 *input_line_pointer = '/';
23077 input_line_pointer += 5;
23078 *input_line_pointer = 0;
23079 return TRUE;
23080 }
23081
23082 return FALSE;
23083}
23084
23085char *
23086arm_canonicalize_symbol_name (char * name)
23087{
23088 int len;
23089
23090 if (thumb_mode && (len = strlen (name)) > 5
23091 && streq (name + len - 5, "/data"))
23092 *(name + len - 5) = 0;
23093
23094 return name;
23095}
23096\f
23097/* Table of all register names defined by default. The user can
23098 define additional names with .req. Note that all register names
23099 should appear in both upper and lowercase variants. Some registers
23100 also have mixed-case names. */
23101
23102#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
23103#define REGNUM(p,n,t) REGDEF(p##n, n, t)
23104#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23105#define REGSET(p,t) \
23106 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23107 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23108 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23109 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23110#define REGSETH(p,t) \
23111 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23112 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23113 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23114 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23115#define REGSET2(p,t) \
23116 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23117 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23118 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23119 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23120#define SPLRBANK(base,bank,t) \
23121 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23122 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23123 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23124 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23125 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23126 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23127
23128static const struct reg_entry reg_names[] =
23129{
23130 /* ARM integer registers. */
23131 REGSET(r, RN), REGSET(R, RN),
23132
23133 /* ATPCS synonyms. */
23134 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23135 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23136 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23137
23138 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23139 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23140 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23141
23142 /* Well-known aliases. */
23143 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23144 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23145
23146 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23147 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23148
23149 /* Defining the new Zero register from ARMv8.1-M. */
23150 REGDEF(zr,15,ZR),
23151 REGDEF(ZR,15,ZR),
23152
23153 /* Coprocessor numbers. */
23154 REGSET(p, CP), REGSET(P, CP),
23155
23156 /* Coprocessor register numbers. The "cr" variants are for backward
23157 compatibility. */
23158 REGSET(c, CN), REGSET(C, CN),
23159 REGSET(cr, CN), REGSET(CR, CN),
23160
23161 /* ARM banked registers. */
23162 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23163 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23164 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23165 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23166 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23167 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23168 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23169
23170 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23171 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23172 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23173 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23174 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23175 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23176 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23177 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23178
23179 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23180 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23181 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23182 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23183 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23184 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23185 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23186 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23187 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23188
23189 /* FPA registers. */
23190 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23191 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23192
23193 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23194 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23195
23196 /* VFP SP registers. */
23197 REGSET(s,VFS), REGSET(S,VFS),
23198 REGSETH(s,VFS), REGSETH(S,VFS),
23199
23200 /* VFP DP Registers. */
23201 REGSET(d,VFD), REGSET(D,VFD),
23202 /* Extra Neon DP registers. */
23203 REGSETH(d,VFD), REGSETH(D,VFD),
23204
23205 /* Neon QP registers. */
23206 REGSET2(q,NQ), REGSET2(Q,NQ),
23207
23208 /* VFP control registers. */
23209 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23210 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23211 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23212 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23213 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23214 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23215 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23216 REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23217 REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23218 REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23219 REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23220
23221 /* Maverick DSP coprocessor registers. */
23222 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
23223 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
23224
23225 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23226 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23227 REGDEF(dspsc,0,DSPSC),
23228
23229 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23230 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23231 REGDEF(DSPSC,0,DSPSC),
23232
23233 /* iWMMXt data registers - p0, c0-15. */
23234 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23235
23236 /* iWMMXt control registers - p1, c0-3. */
23237 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
23238 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
23239 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
23240 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
23241
23242 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
23243 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
23244 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
23245 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
23246 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
23247
23248 /* XScale accumulator registers. */
23249 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23250};
23251#undef REGDEF
23252#undef REGNUM
23253#undef REGSET
23254
23255/* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
23256 within psr_required_here. */
23257static const struct asm_psr psrs[] =
23258{
23259 /* Backward compatibility notation. Note that "all" is no longer
23260 truly all possible PSR bits. */
23261 {"all", PSR_c | PSR_f},
23262 {"flg", PSR_f},
23263 {"ctl", PSR_c},
23264
23265 /* Individual flags. */
23266 {"f", PSR_f},
23267 {"c", PSR_c},
23268 {"x", PSR_x},
23269 {"s", PSR_s},
23270
23271 /* Combinations of flags. */
23272 {"fs", PSR_f | PSR_s},
23273 {"fx", PSR_f | PSR_x},
23274 {"fc", PSR_f | PSR_c},
23275 {"sf", PSR_s | PSR_f},
23276 {"sx", PSR_s | PSR_x},
23277 {"sc", PSR_s | PSR_c},
23278 {"xf", PSR_x | PSR_f},
23279 {"xs", PSR_x | PSR_s},
23280 {"xc", PSR_x | PSR_c},
23281 {"cf", PSR_c | PSR_f},
23282 {"cs", PSR_c | PSR_s},
23283 {"cx", PSR_c | PSR_x},
23284 {"fsx", PSR_f | PSR_s | PSR_x},
23285 {"fsc", PSR_f | PSR_s | PSR_c},
23286 {"fxs", PSR_f | PSR_x | PSR_s},
23287 {"fxc", PSR_f | PSR_x | PSR_c},
23288 {"fcs", PSR_f | PSR_c | PSR_s},
23289 {"fcx", PSR_f | PSR_c | PSR_x},
23290 {"sfx", PSR_s | PSR_f | PSR_x},
23291 {"sfc", PSR_s | PSR_f | PSR_c},
23292 {"sxf", PSR_s | PSR_x | PSR_f},
23293 {"sxc", PSR_s | PSR_x | PSR_c},
23294 {"scf", PSR_s | PSR_c | PSR_f},
23295 {"scx", PSR_s | PSR_c | PSR_x},
23296 {"xfs", PSR_x | PSR_f | PSR_s},
23297 {"xfc", PSR_x | PSR_f | PSR_c},
23298 {"xsf", PSR_x | PSR_s | PSR_f},
23299 {"xsc", PSR_x | PSR_s | PSR_c},
23300 {"xcf", PSR_x | PSR_c | PSR_f},
23301 {"xcs", PSR_x | PSR_c | PSR_s},
23302 {"cfs", PSR_c | PSR_f | PSR_s},
23303 {"cfx", PSR_c | PSR_f | PSR_x},
23304 {"csf", PSR_c | PSR_s | PSR_f},
23305 {"csx", PSR_c | PSR_s | PSR_x},
23306 {"cxf", PSR_c | PSR_x | PSR_f},
23307 {"cxs", PSR_c | PSR_x | PSR_s},
23308 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23309 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23310 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23311 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23312 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23313 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23314 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23315 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23316 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23317 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23318 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23319 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23320 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23321 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23322 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23323 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23324 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23325 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23326 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23327 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23328 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23329 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23330 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23331 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23332};
23333
23334/* Table of V7M psr names. */
23335static const struct asm_psr v7m_psrs[] =
23336{
23337 {"apsr", 0x0 }, {"APSR", 0x0 },
23338 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
23339 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
23340 {"psr", 0x3 }, {"PSR", 0x3 },
23341 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
23342 {"ipsr", 0x5 }, {"IPSR", 0x5 },
23343 {"epsr", 0x6 }, {"EPSR", 0x6 },
23344 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
23345 {"msp", 0x8 }, {"MSP", 0x8 },
23346 {"psp", 0x9 }, {"PSP", 0x9 },
23347 {"msplim", 0xa }, {"MSPLIM", 0xa },
23348 {"psplim", 0xb }, {"PSPLIM", 0xb },
23349 {"primask", 0x10}, {"PRIMASK", 0x10},
23350 {"basepri", 0x11}, {"BASEPRI", 0x11},
23351 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
23352 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
23353 {"control", 0x14}, {"CONTROL", 0x14},
23354 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
23355 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
23356 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
23357 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
23358 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
23359 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
23360 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
23361 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
23362 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
23363};
23364
23365/* Table of all shift-in-operand names. */
23366static const struct asm_shift_name shift_names [] =
23367{
23368 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
23369 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
23370 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
23371 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
23372 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
23373 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
23374 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
23375};
23376
23377/* Table of all explicit relocation names. */
23378#ifdef OBJ_ELF
23379static struct reloc_entry reloc_names[] =
23380{
23381 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
23382 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
23383 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
23384 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
23385 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
23386 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
23387 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
23388 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
23389 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
23390 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
23391 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
23392 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
23393 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
23394 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
23395 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
23396 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
23397 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
23398 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
23399 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
23400 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
23401 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23402 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23403 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
23404 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
23405 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
23406 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
23407 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
23408};
23409#endif
23410
23411/* Table of all conditional affixes. */
23412static const struct asm_cond conds[] =
23413{
23414 {"eq", 0x0},
23415 {"ne", 0x1},
23416 {"cs", 0x2}, {"hs", 0x2},
23417 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
23418 {"mi", 0x4},
23419 {"pl", 0x5},
23420 {"vs", 0x6},
23421 {"vc", 0x7},
23422 {"hi", 0x8},
23423 {"ls", 0x9},
23424 {"ge", 0xa},
23425 {"lt", 0xb},
23426 {"gt", 0xc},
23427 {"le", 0xd},
23428 {"al", 0xe}
23429};
23430static const struct asm_cond vconds[] =
23431{
23432 {"t", 0xf},
23433 {"e", 0x10}
23434};
23435
23436#define UL_BARRIER(L,U,CODE,FEAT) \
23437 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
23438 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
23439
23440static struct asm_barrier_opt barrier_opt_names[] =
23441{
23442 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
23443 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
23444 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
23445 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
23446 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
23447 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
23448 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
23449 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
23450 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
23451 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
23452 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
23453 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
23454 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
23455 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
23456 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
23457 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
23458};
23459
23460#undef UL_BARRIER
23461
23462/* Table of ARM-format instructions. */
23463
23464/* Macros for gluing together operand strings. N.B. In all cases
23465 other than OPS0, the trailing OP_stop comes from default
23466 zero-initialization of the unspecified elements of the array. */
23467#define OPS0() { OP_stop, }
23468#define OPS1(a) { OP_##a, }
23469#define OPS2(a,b) { OP_##a,OP_##b, }
23470#define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
23471#define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
23472#define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
23473#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
23474
23475/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
23476 This is useful when mixing operands for ARM and THUMB, i.e. using the
23477 MIX_ARM_THUMB_OPERANDS macro.
23478 In order to use these macros, prefix the number of operands with _
23479 e.g. _3. */
23480#define OPS_1(a) { a, }
23481#define OPS_2(a,b) { a,b, }
23482#define OPS_3(a,b,c) { a,b,c, }
23483#define OPS_4(a,b,c,d) { a,b,c,d, }
23484#define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
23485#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
23486
23487/* These macros abstract out the exact format of the mnemonic table and
23488 save some repeated characters. */
23489
23490/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
23491#define TxCE(mnem, op, top, nops, ops, ae, te) \
23492 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
23493 THUMB_VARIANT, do_##ae, do_##te, 0 }
23494
23495/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
23496 a T_MNEM_xyz enumerator. */
23497#define TCE(mnem, aop, top, nops, ops, ae, te) \
23498 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
23499#define tCE(mnem, aop, top, nops, ops, ae, te) \
23500 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23501
23502/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
23503 infix after the third character. */
23504#define TxC3(mnem, op, top, nops, ops, ae, te) \
23505 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
23506 THUMB_VARIANT, do_##ae, do_##te, 0 }
23507#define TxC3w(mnem, op, top, nops, ops, ae, te) \
23508 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
23509 THUMB_VARIANT, do_##ae, do_##te, 0 }
23510#define TC3(mnem, aop, top, nops, ops, ae, te) \
23511 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
23512#define TC3w(mnem, aop, top, nops, ops, ae, te) \
23513 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
23514#define tC3(mnem, aop, top, nops, ops, ae, te) \
23515 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23516#define tC3w(mnem, aop, top, nops, ops, ae, te) \
23517 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23518
23519/* Mnemonic that cannot be conditionalized. The ARM condition-code
23520 field is still 0xE. Many of the Thumb variants can be executed
23521 conditionally, so this is checked separately. */
23522#define TUE(mnem, op, top, nops, ops, ae, te) \
23523 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23524 THUMB_VARIANT, do_##ae, do_##te, 0 }
23525
23526/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
23527 Used by mnemonics that have very minimal differences in the encoding for
23528 ARM and Thumb variants and can be handled in a common function. */
23529#define TUEc(mnem, op, top, nops, ops, en) \
23530 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23531 THUMB_VARIANT, do_##en, do_##en, 0 }
23532
23533/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
23534 condition code field. */
23535#define TUF(mnem, op, top, nops, ops, ae, te) \
23536 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
23537 THUMB_VARIANT, do_##ae, do_##te, 0 }
23538
23539/* ARM-only variants of all the above. */
23540#define CE(mnem, op, nops, ops, ae) \
23541 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23542
23543#define C3(mnem, op, nops, ops, ae) \
23544 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23545
23546/* Thumb-only variants of TCE and TUE. */
23547#define ToC(mnem, top, nops, ops, te) \
23548 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23549 do_##te, 0 }
23550
23551#define ToU(mnem, top, nops, ops, te) \
23552 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
23553 NULL, do_##te, 0 }
23554
23555/* T_MNEM_xyz enumerator variants of ToC. */
23556#define toC(mnem, top, nops, ops, te) \
23557 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
23558 do_##te, 0 }
23559
23560/* T_MNEM_xyz enumerator variants of ToU. */
23561#define toU(mnem, top, nops, ops, te) \
23562 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
23563 NULL, do_##te, 0 }
23564
23565/* Legacy mnemonics that always have conditional infix after the third
23566 character. */
23567#define CL(mnem, op, nops, ops, ae) \
23568 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23569 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23570
23571/* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
23572#define cCE(mnem, op, nops, ops, ae) \
23573 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23574
23575/* mov instructions that are shared between coprocessor and MVE. */
23576#define mcCE(mnem, op, nops, ops, ae) \
23577 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
23578
23579/* Legacy coprocessor instructions where conditional infix and conditional
23580 suffix are ambiguous. For consistency this includes all FPA instructions,
23581 not just the potentially ambiguous ones. */
23582#define cCL(mnem, op, nops, ops, ae) \
23583 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23584 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23585
23586/* Coprocessor, takes either a suffix or a position-3 infix
23587 (for an FPA corner case). */
23588#define C3E(mnem, op, nops, ops, ae) \
23589 { mnem, OPS##nops ops, OT_csuf_or_in3, \
23590 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23591
23592#define xCM_(m1, m2, m3, op, nops, ops, ae) \
23593 { m1 #m2 m3, OPS##nops ops, \
23594 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
23595 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23596
23597#define CM(m1, m2, op, nops, ops, ae) \
23598 xCM_ (m1, , m2, op, nops, ops, ae), \
23599 xCM_ (m1, eq, m2, op, nops, ops, ae), \
23600 xCM_ (m1, ne, m2, op, nops, ops, ae), \
23601 xCM_ (m1, cs, m2, op, nops, ops, ae), \
23602 xCM_ (m1, hs, m2, op, nops, ops, ae), \
23603 xCM_ (m1, cc, m2, op, nops, ops, ae), \
23604 xCM_ (m1, ul, m2, op, nops, ops, ae), \
23605 xCM_ (m1, lo, m2, op, nops, ops, ae), \
23606 xCM_ (m1, mi, m2, op, nops, ops, ae), \
23607 xCM_ (m1, pl, m2, op, nops, ops, ae), \
23608 xCM_ (m1, vs, m2, op, nops, ops, ae), \
23609 xCM_ (m1, vc, m2, op, nops, ops, ae), \
23610 xCM_ (m1, hi, m2, op, nops, ops, ae), \
23611 xCM_ (m1, ls, m2, op, nops, ops, ae), \
23612 xCM_ (m1, ge, m2, op, nops, ops, ae), \
23613 xCM_ (m1, lt, m2, op, nops, ops, ae), \
23614 xCM_ (m1, gt, m2, op, nops, ops, ae), \
23615 xCM_ (m1, le, m2, op, nops, ops, ae), \
23616 xCM_ (m1, al, m2, op, nops, ops, ae)
23617
23618#define UE(mnem, op, nops, ops, ae) \
23619 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23620
23621#define UF(mnem, op, nops, ops, ae) \
23622 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23623
23624/* Neon data-processing. ARM versions are unconditional with cond=0xf.
23625 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
23626 use the same encoding function for each. */
23627#define NUF(mnem, op, nops, ops, enc) \
23628 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
23629 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23630
23631/* Neon data processing, version which indirects through neon_enc_tab for
23632 the various overloaded versions of opcodes. */
23633#define nUF(mnem, op, nops, ops, enc) \
23634 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
23635 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23636
23637/* Neon insn with conditional suffix for the ARM version, non-overloaded
23638 version. */
23639#define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
23640 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
23641 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23642
23643#define NCE(mnem, op, nops, ops, enc) \
23644 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23645
23646#define NCEF(mnem, op, nops, ops, enc) \
23647 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23648
23649/* Neon insn with conditional suffix for the ARM version, overloaded types. */
23650#define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
23651 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
23652 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23653
23654#define nCE(mnem, op, nops, ops, enc) \
23655 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23656
23657#define nCEF(mnem, op, nops, ops, enc) \
23658 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23659
23660/* */
23661#define mCEF(mnem, op, nops, ops, enc) \
23662 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
23663 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23664
23665
23666/* nCEF but for MVE predicated instructions. */
23667#define mnCEF(mnem, op, nops, ops, enc) \
23668 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23669
23670/* nCE but for MVE predicated instructions. */
23671#define mnCE(mnem, op, nops, ops, enc) \
23672 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23673
23674/* NUF but for potentially MVE predicated instructions. */
23675#define MNUF(mnem, op, nops, ops, enc) \
23676 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
23677 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23678
23679/* nUF but for potentially MVE predicated instructions. */
23680#define mnUF(mnem, op, nops, ops, enc) \
23681 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
23682 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23683
23684/* ToC but for potentially MVE predicated instructions. */
23685#define mToC(mnem, top, nops, ops, te) \
23686 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23687 do_##te, 1 }
23688
23689/* NCE but for MVE predicated instructions. */
23690#define MNCE(mnem, op, nops, ops, enc) \
23691 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23692
23693/* NCEF but for MVE predicated instructions. */
23694#define MNCEF(mnem, op, nops, ops, enc) \
23695 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23696#define do_0 0
23697
23698static const struct asm_opcode insns[] =
23699{
23700#define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
23701#define THUMB_VARIANT & arm_ext_v4t
23702 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
23703 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
23704 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
23705 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
23706 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
23707 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
23708 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
23709 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
23710 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
23711 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
23712 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
23713 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
23714 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
23715 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
23716 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
23717 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
23718
23719 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
23720 for setting PSR flag bits. They are obsolete in V6 and do not
23721 have Thumb equivalents. */
23722 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
23723 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
23724 CL("tstp", 110f000, 2, (RR, SH), cmp),
23725 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
23726 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
23727 CL("cmpp", 150f000, 2, (RR, SH), cmp),
23728 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
23729 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
23730 CL("cmnp", 170f000, 2, (RR, SH), cmp),
23731
23732 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
23733 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
23734 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
23735 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
23736
23737 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
23738 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23739 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
23740 OP_RRnpc),
23741 OP_ADDRGLDR),ldst, t_ldst),
23742 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23743
23744 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23745 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23746 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23747 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23748 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23749 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23750
23751 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
23752 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
23753
23754 /* Pseudo ops. */
23755 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
23756 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
23757 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
23758 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
23759
23760 /* Thumb-compatibility pseudo ops. */
23761 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
23762 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
23763 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
23764 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
23765 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
23766 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
23767 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
23768 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
23769 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
23770 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
23771 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
23772 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
23773
23774 /* These may simplify to neg. */
23775 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
23776 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
23777
23778#undef THUMB_VARIANT
23779#define THUMB_VARIANT & arm_ext_os
23780
23781 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
23782 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
23783
23784#undef THUMB_VARIANT
23785#define THUMB_VARIANT & arm_ext_v6
23786
23787 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
23788
23789 /* V1 instructions with no Thumb analogue prior to V6T2. */
23790#undef THUMB_VARIANT
23791#define THUMB_VARIANT & arm_ext_v6t2
23792
23793 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
23794 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
23795 CL("teqp", 130f000, 2, (RR, SH), cmp),
23796
23797 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23798 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23799 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
23800 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23801
23802 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23803 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23804
23805 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23806 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23807
23808 /* V1 instructions with no Thumb analogue at all. */
23809 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
23810 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
23811
23812 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
23813 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
23814 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
23815 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
23816 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
23817 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
23818 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
23819 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
23820
23821#undef ARM_VARIANT
23822#define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
23823#undef THUMB_VARIANT
23824#define THUMB_VARIANT & arm_ext_v4t
23825
23826 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
23827 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
23828
23829#undef THUMB_VARIANT
23830#define THUMB_VARIANT & arm_ext_v6t2
23831
23832 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
23833 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
23834
23835 /* Generic coprocessor instructions. */
23836 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
23837 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23838 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23839 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23840 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23841 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
23842 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
23843
23844#undef ARM_VARIANT
23845#define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
23846
23847 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23848 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23849
23850#undef ARM_VARIANT
23851#define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
23852#undef THUMB_VARIANT
23853#define THUMB_VARIANT & arm_ext_msr
23854
23855 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
23856 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
23857
23858#undef ARM_VARIANT
23859#define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
23860#undef THUMB_VARIANT
23861#define THUMB_VARIANT & arm_ext_v6t2
23862
23863 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
23864 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
23865 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
23866 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
23867 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
23868 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
23869 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
23870 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
23871
23872#undef ARM_VARIANT
23873#define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
23874#undef THUMB_VARIANT
23875#define THUMB_VARIANT & arm_ext_v4t
23876
23877 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23878 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23879 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23880 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23881 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23882 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
23883
23884#undef ARM_VARIANT
23885#define ARM_VARIANT & arm_ext_v4t_5
23886
23887 /* ARM Architecture 4T. */
23888 /* Note: bx (and blx) are required on V5, even if the processor does
23889 not support Thumb. */
23890 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
23891
23892#undef ARM_VARIANT
23893#define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
23894#undef THUMB_VARIANT
23895#define THUMB_VARIANT & arm_ext_v5t
23896
23897 /* Note: blx has 2 variants; the .value coded here is for
23898 BLX(2). Only this variant has conditional execution. */
23899 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
23900 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
23901
23902#undef THUMB_VARIANT
23903#define THUMB_VARIANT & arm_ext_v6t2
23904
23905 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
23906 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23907 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23908 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23909 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23910 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
23911 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
23912 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
23913
23914#undef ARM_VARIANT
23915#define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
23916#undef THUMB_VARIANT
23917#define THUMB_VARIANT & arm_ext_v5exp
23918
23919 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23920 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23921 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23922 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23923
23924 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23925 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
23926
23927 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
23928 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
23929 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
23930 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
23931
23932 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23933 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23934 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23935 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23936
23937 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23938 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
23939
23940 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
23941 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
23942 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
23943 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
23944
23945#undef ARM_VARIANT
23946#define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
23947#undef THUMB_VARIANT
23948#define THUMB_VARIANT & arm_ext_v6t2
23949
23950 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
23951 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
23952 ldrd, t_ldstd),
23953 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
23954 ADDRGLDRS), ldrd, t_ldstd),
23955
23956 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
23957 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
23958
23959#undef ARM_VARIANT
23960#define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
23961
23962 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
23963
23964#undef ARM_VARIANT
23965#define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
23966#undef THUMB_VARIANT
23967#define THUMB_VARIANT & arm_ext_v6
23968
23969 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
23970 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
23971 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
23972 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
23973 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
23974 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
23975 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
23976 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
23977 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
23978 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
23979
23980#undef THUMB_VARIANT
23981#define THUMB_VARIANT & arm_ext_v6t2_v8m
23982
23983 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
23984 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
23985 strex, t_strex),
23986#undef THUMB_VARIANT
23987#define THUMB_VARIANT & arm_ext_v6t2
23988
23989 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
23990 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
23991
23992 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
23993 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
23994
23995/* ARM V6 not included in V7M. */
23996#undef THUMB_VARIANT
23997#define THUMB_VARIANT & arm_ext_v6_notm
23998 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
23999 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24000 UF(rfeib, 9900a00, 1, (RRw), rfe),
24001 UF(rfeda, 8100a00, 1, (RRw), rfe),
24002 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24003 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24004 UF(rfefa, 8100a00, 1, (RRw), rfe),
24005 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24006 UF(rfeed, 9900a00, 1, (RRw), rfe),
24007 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24008 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24009 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24010 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
24011 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
24012 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
24013 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
24014 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24015 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24016 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
24017
24018/* ARM V6 not included in V7M (eg. integer SIMD). */
24019#undef THUMB_VARIANT
24020#define THUMB_VARIANT & arm_ext_v6_dsp
24021 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
24022 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
24023 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24024 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24025 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24026 /* Old name for QASX. */
24027 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24028 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24029 /* Old name for QSAX. */
24030 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24031 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24032 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24033 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24034 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24035 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24036 /* Old name for SASX. */
24037 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24038 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24039 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24040 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24041 /* Old name for SHASX. */
24042 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24043 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24044 /* Old name for SHSAX. */
24045 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24046 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24047 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24048 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24049 /* Old name for SSAX. */
24050 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24051 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24052 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24053 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24054 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24055 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24056 /* Old name for UASX. */
24057 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24058 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24059 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24060 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24061 /* Old name for UHASX. */
24062 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24063 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24064 /* Old name for UHSAX. */
24065 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24066 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24067 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24068 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24069 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24070 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24071 /* Old name for UQASX. */
24072 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24073 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24074 /* Old name for UQSAX. */
24075 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24076 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24077 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24078 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24079 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24080 /* Old name for USAX. */
24081 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24082 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24083 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24084 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24085 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24086 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24087 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24088 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24089 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24090 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24091 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24092 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24093 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24094 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24095 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24096 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24097 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24098 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24099 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24100 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24101 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24102 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24103 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24104 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24105 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24106 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24107 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24108 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24109 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24110 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
24111 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
24112 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24113 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24114 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
24115
24116#undef ARM_VARIANT
24117#define ARM_VARIANT & arm_ext_v6k_v6t2
24118#undef THUMB_VARIANT
24119#define THUMB_VARIANT & arm_ext_v6k_v6t2
24120
24121 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
24122 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
24123 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
24124 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
24125
24126#undef THUMB_VARIANT
24127#define THUMB_VARIANT & arm_ext_v6_notm
24128 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24129 ldrexd, t_ldrexd),
24130 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24131 RRnpcb), strexd, t_strexd),
24132
24133#undef THUMB_VARIANT
24134#define THUMB_VARIANT & arm_ext_v6t2_v8m
24135 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24136 rd_rn, rd_rn),
24137 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24138 rd_rn, rd_rn),
24139 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24140 strex, t_strexbh),
24141 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24142 strex, t_strexbh),
24143 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
24144
24145#undef ARM_VARIANT
24146#define ARM_VARIANT & arm_ext_sec
24147#undef THUMB_VARIANT
24148#define THUMB_VARIANT & arm_ext_sec
24149
24150 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
24151
24152#undef ARM_VARIANT
24153#define ARM_VARIANT & arm_ext_virt
24154#undef THUMB_VARIANT
24155#define THUMB_VARIANT & arm_ext_virt
24156
24157 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24158 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
24159
24160#undef ARM_VARIANT
24161#define ARM_VARIANT & arm_ext_pan
24162#undef THUMB_VARIANT
24163#define THUMB_VARIANT & arm_ext_pan
24164
24165 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
24166
24167#undef ARM_VARIANT
24168#define ARM_VARIANT & arm_ext_v6t2
24169#undef THUMB_VARIANT
24170#define THUMB_VARIANT & arm_ext_v6t2
24171
24172 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
24173 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24174 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24175 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24176
24177 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24178 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
24179
24180 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24181 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24182 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24183 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24184
24185#undef ARM_VARIANT
24186#define ARM_VARIANT & arm_ext_v3
24187#undef THUMB_VARIANT
24188#define THUMB_VARIANT & arm_ext_v6t2
24189
24190 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
24191 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24192 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24193
24194#undef ARM_VARIANT
24195#define ARM_VARIANT & arm_ext_v6t2
24196#undef THUMB_VARIANT
24197#define THUMB_VARIANT & arm_ext_v6t2_v8m
24198 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
24199 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
24200
24201 /* Thumb-only instructions. */
24202#undef ARM_VARIANT
24203#define ARM_VARIANT NULL
24204 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
24205 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
24206
24207 /* ARM does not really have an IT instruction, so always allow it.
24208 The opcode is copied from Thumb in order to allow warnings in
24209 -mimplicit-it=[never | arm] modes. */
24210#undef ARM_VARIANT
24211#define ARM_VARIANT & arm_ext_v1
24212#undef THUMB_VARIANT
24213#define THUMB_VARIANT & arm_ext_v6t2
24214
24215 TUE("it", bf08, bf08, 1, (COND), it, t_it),
24216 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
24217 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
24218 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
24219 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
24220 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
24221 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
24222 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
24223 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
24224 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
24225 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
24226 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
24227 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
24228 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
24229 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
24230 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
24231 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24232 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24233
24234 /* Thumb2 only instructions. */
24235#undef ARM_VARIANT
24236#define ARM_VARIANT NULL
24237
24238 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24239 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24240 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
24241 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
24242 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
24243 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
24244
24245 /* Hardware division instructions. */
24246#undef ARM_VARIANT
24247#define ARM_VARIANT & arm_ext_adiv
24248#undef THUMB_VARIANT
24249#define THUMB_VARIANT & arm_ext_div
24250
24251 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24252 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24253
24254 /* ARM V6M/V7 instructions. */
24255#undef ARM_VARIANT
24256#define ARM_VARIANT & arm_ext_barrier
24257#undef THUMB_VARIANT
24258#define THUMB_VARIANT & arm_ext_barrier
24259
24260 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24261 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24262 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24263
24264 /* ARM V7 instructions. */
24265#undef ARM_VARIANT
24266#define ARM_VARIANT & arm_ext_v7
24267#undef THUMB_VARIANT
24268#define THUMB_VARIANT & arm_ext_v7
24269
24270 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
24271 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
24272
24273#undef ARM_VARIANT
24274#define ARM_VARIANT & arm_ext_mp
24275#undef THUMB_VARIANT
24276#define THUMB_VARIANT & arm_ext_mp
24277
24278 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
24279
24280 /* AArchv8 instructions. */
24281#undef ARM_VARIANT
24282#define ARM_VARIANT & arm_ext_v8
24283
24284/* Instructions shared between armv8-a and armv8-m. */
24285#undef THUMB_VARIANT
24286#define THUMB_VARIANT & arm_ext_atomics
24287
24288 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24289 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24290 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24291 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24292 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24293 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24294 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24295 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
24296 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24297 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24298 stlex, t_stlex),
24299 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24300 stlex, t_stlex),
24301 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24302 stlex, t_stlex),
24303#undef THUMB_VARIANT
24304#define THUMB_VARIANT & arm_ext_v8
24305
24306 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
24307 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24308 ldrexd, t_ldrexd),
24309 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24310 strexd, t_strexd),
24311
24312/* Defined in V8 but is in undefined encoding space for earlier
24313 architectures. However earlier architectures are required to treat
24314 this instuction as a semihosting trap as well. Hence while not explicitly
24315 defined as such, it is in fact correct to define the instruction for all
24316 architectures. */
24317#undef THUMB_VARIANT
24318#define THUMB_VARIANT & arm_ext_v1
24319#undef ARM_VARIANT
24320#define ARM_VARIANT & arm_ext_v1
24321 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
24322
24323 /* ARMv8 T32 only. */
24324#undef ARM_VARIANT
24325#define ARM_VARIANT NULL
24326 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
24327 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
24328 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
24329
24330 /* FP for ARMv8. */
24331#undef ARM_VARIANT
24332#define ARM_VARIANT & fpu_vfp_ext_armv8xd
24333#undef THUMB_VARIANT
24334#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24335
24336 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
24337 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
24338 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
24339 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
24340 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
24341 mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintz),
24342 mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintx),
24343 mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrinta),
24344 mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintn),
24345 mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintp),
24346 mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintm),
24347
24348 /* Crypto v1 extensions. */
24349#undef ARM_VARIANT
24350#define ARM_VARIANT & fpu_crypto_ext_armv8
24351#undef THUMB_VARIANT
24352#define THUMB_VARIANT & fpu_crypto_ext_armv8
24353
24354 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
24355 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
24356 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
24357 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
24358 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
24359 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
24360 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
24361 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
24362 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
24363 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
24364 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
24365 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
24366 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
24367 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
24368
24369#undef ARM_VARIANT
24370#define ARM_VARIANT & crc_ext_armv8
24371#undef THUMB_VARIANT
24372#define THUMB_VARIANT & crc_ext_armv8
24373 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
24374 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
24375 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
24376 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
24377 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
24378 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
24379
24380 /* ARMv8.2 RAS extension. */
24381#undef ARM_VARIANT
24382#define ARM_VARIANT & arm_ext_ras
24383#undef THUMB_VARIANT
24384#define THUMB_VARIANT & arm_ext_ras
24385 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
24386
24387#undef ARM_VARIANT
24388#define ARM_VARIANT & arm_ext_v8_3
24389#undef THUMB_VARIANT
24390#define THUMB_VARIANT & arm_ext_v8_3
24391 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
24392
24393#undef ARM_VARIANT
24394#define ARM_VARIANT & fpu_neon_ext_dotprod
24395#undef THUMB_VARIANT
24396#define THUMB_VARIANT & fpu_neon_ext_dotprod
24397 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
24398 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
24399
24400#undef ARM_VARIANT
24401#define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
24402#undef THUMB_VARIANT
24403#define THUMB_VARIANT NULL
24404
24405 cCE("wfs", e200110, 1, (RR), rd),
24406 cCE("rfs", e300110, 1, (RR), rd),
24407 cCE("wfc", e400110, 1, (RR), rd),
24408 cCE("rfc", e500110, 1, (RR), rd),
24409
24410 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
24411 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
24412 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
24413 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
24414
24415 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
24416 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
24417 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
24418 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
24419
24420 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
24421 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
24422 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
24423 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
24424 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
24425 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
24426 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
24427 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
24428 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
24429 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
24430 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
24431 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
24432
24433 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
24434 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
24435 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
24436 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
24437 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
24438 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
24439 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
24440 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
24441 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
24442 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
24443 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
24444 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
24445
24446 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
24447 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
24448 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
24449 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
24450 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
24451 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
24452 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
24453 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
24454 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
24455 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
24456 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
24457 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
24458
24459 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
24460 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
24461 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
24462 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
24463 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
24464 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
24465 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
24466 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
24467 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
24468 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
24469 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
24470 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
24471
24472 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
24473 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
24474 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
24475 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
24476 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
24477 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
24478 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
24479 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
24480 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
24481 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
24482 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
24483 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
24484
24485 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
24486 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
24487 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
24488 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
24489 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
24490 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
24491 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
24492 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
24493 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
24494 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
24495 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
24496 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
24497
24498 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
24499 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
24500 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
24501 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
24502 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
24503 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
24504 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
24505 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
24506 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
24507 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
24508 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
24509 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
24510
24511 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
24512 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
24513 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
24514 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
24515 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
24516 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
24517 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
24518 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
24519 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
24520 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
24521 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
24522 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
24523
24524 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
24525 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
24526 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
24527 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
24528 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
24529 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
24530 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
24531 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
24532 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
24533 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
24534 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
24535 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
24536
24537 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
24538 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
24539 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
24540 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
24541 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
24542 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
24543 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
24544 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
24545 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
24546 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
24547 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
24548 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
24549
24550 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
24551 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
24552 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
24553 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
24554 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
24555 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
24556 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
24557 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
24558 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
24559 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
24560 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
24561 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
24562
24563 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
24564 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
24565 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
24566 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
24567 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
24568 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
24569 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
24570 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
24571 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
24572 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
24573 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
24574 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
24575
24576 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
24577 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
24578 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
24579 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
24580 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
24581 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
24582 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
24583 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
24584 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
24585 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
24586 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
24587 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
24588
24589 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
24590 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
24591 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
24592 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
24593 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
24594 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
24595 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
24596 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
24597 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
24598 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
24599 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
24600 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
24601
24602 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
24603 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
24604 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
24605 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
24606 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
24607 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
24608 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
24609 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
24610 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
24611 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
24612 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
24613 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
24614
24615 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
24616 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
24617 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
24618 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
24619 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
24620 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
24621 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
24622 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
24623 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
24624 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
24625 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
24626 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
24627
24628 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
24629 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
24630 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
24631 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
24632 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
24633 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24634 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24635 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24636 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
24637 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
24638 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
24639 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
24640
24641 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
24642 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
24643 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
24644 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
24645 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
24646 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24647 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24648 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24649 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
24650 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
24651 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
24652 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
24653
24654 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
24655 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
24656 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
24657 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
24658 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
24659 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24660 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24661 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24662 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
24663 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
24664 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
24665 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
24666
24667 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
24668 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
24669 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
24670 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
24671 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
24672 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24673 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24674 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24675 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
24676 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
24677 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
24678 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
24679
24680 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
24681 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
24682 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
24683 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
24684 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
24685 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24686 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24687 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24688 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
24689 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
24690 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
24691 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
24692
24693 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
24694 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
24695 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
24696 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
24697 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
24698 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24699 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24700 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24701 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
24702 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
24703 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
24704 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
24705
24706 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
24707 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
24708 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
24709 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
24710 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
24711 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24712 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24713 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24714 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
24715 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
24716 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
24717 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
24718
24719 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
24720 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
24721 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
24722 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
24723 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
24724 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24725 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24726 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24727 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
24728 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
24729 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
24730 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
24731
24732 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
24733 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
24734 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
24735 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
24736 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
24737 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24738 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24739 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24740 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
24741 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
24742 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
24743 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
24744
24745 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
24746 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
24747 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
24748 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
24749 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
24750 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24751 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24752 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24753 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
24754 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
24755 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
24756 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
24757
24758 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24759 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24760 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24761 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24762 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24763 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24764 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24765 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24766 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24767 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24768 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24769 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24770
24771 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24772 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24773 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24774 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24775 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24776 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24777 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24778 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24779 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24780 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24781 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24782 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24783
24784 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24785 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24786 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24787 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24788 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24789 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24790 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24791 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24792 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24793 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24794 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24795 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24796
24797 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
24798 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
24799 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
24800 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
24801
24802 cCL("flts", e000110, 2, (RF, RR), rn_rd),
24803 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
24804 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
24805 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
24806 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
24807 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
24808 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
24809 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
24810 cCL("flte", e080110, 2, (RF, RR), rn_rd),
24811 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
24812 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
24813 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
24814
24815 /* The implementation of the FIX instruction is broken on some
24816 assemblers, in that it accepts a precision specifier as well as a
24817 rounding specifier, despite the fact that this is meaningless.
24818 To be more compatible, we accept it as well, though of course it
24819 does not set any bits. */
24820 cCE("fix", e100110, 2, (RR, RF), rd_rm),
24821 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
24822 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
24823 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
24824 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
24825 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
24826 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
24827 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
24828 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
24829 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
24830 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
24831 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
24832 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
24833
24834 /* Instructions that were new with the real FPA, call them V2. */
24835#undef ARM_VARIANT
24836#define ARM_VARIANT & fpu_fpa_ext_v2
24837
24838 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24839 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24840 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24841 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24842 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24843 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24844
24845#undef ARM_VARIANT
24846#define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
24847#undef THUMB_VARIANT
24848#define THUMB_VARIANT & arm_ext_v6t2
24849 mcCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs),
24850 mcCE(vmsr, ee00a10, 2, (RVC, RR), vmsr),
24851#undef THUMB_VARIANT
24852
24853 /* Moves and type conversions. */
24854 cCE("fmstat", ef1fa10, 0, (), noargs),
24855 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
24856 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
24857 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
24858 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
24859 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
24860 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
24861 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
24862 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
24863
24864 /* Memory operations. */
24865 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
24866 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
24867 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
24868 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
24869 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
24870 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
24871 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
24872 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
24873 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
24874 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
24875 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
24876 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
24877 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
24878 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
24879 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
24880 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
24881 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
24882 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
24883
24884 /* Monadic operations. */
24885 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
24886 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
24887 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
24888
24889 /* Dyadic operations. */
24890 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24891 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24892 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24893 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24894 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24895 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24896 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24897 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24898 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
24899
24900 /* Comparisons. */
24901 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
24902 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
24903 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
24904 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
24905
24906 /* Double precision load/store are still present on single precision
24907 implementations. */
24908 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
24909 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
24910 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
24911 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
24912 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
24913 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
24914 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
24915 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
24916 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
24917 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
24918
24919#undef ARM_VARIANT
24920#define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
24921
24922 /* Moves and type conversions. */
24923 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
24924 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
24925 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
24926 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
24927 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
24928 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
24929 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
24930 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
24931 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
24932 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
24933 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
24934 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
24935
24936 /* Monadic operations. */
24937 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
24938 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
24939 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
24940
24941 /* Dyadic operations. */
24942 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24943 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24944 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24945 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24946 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24947 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24948 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24949 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24950 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
24951
24952 /* Comparisons. */
24953 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
24954 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
24955 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
24956 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
24957
24958/* Instructions which may belong to either the Neon or VFP instruction sets.
24959 Individual encoder functions perform additional architecture checks. */
24960#undef ARM_VARIANT
24961#define ARM_VARIANT & fpu_vfp_ext_v1xd
24962#undef THUMB_VARIANT
24963#define THUMB_VARIANT & fpu_vfp_ext_v1xd
24964
24965 /* These mnemonics are unique to VFP. */
24966 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
24967 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
24968 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
24969 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
24970 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
24971 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
24972 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
24973 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
24974
24975 /* Mnemonics shared by Neon and VFP. */
24976 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
24977
24978 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24979 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24980 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24981 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24982 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24983 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
24984
24985 mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
24986 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
24987 MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
24988 MNCEF(vcvtt, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
24989
24990
24991 /* NOTE: All VMOV encoding is special-cased! */
24992 NCE(vmovq, 0, 1, (VMOV), neon_mov),
24993
24994#undef THUMB_VARIANT
24995/* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
24996 by different feature bits. Since we are setting the Thumb guard, we can
24997 require Thumb-1 which makes it a nop guard and set the right feature bit in
24998 do_vldr_vstr (). */
24999#define THUMB_VARIANT & arm_ext_v4t
25000 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25001 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25002
25003#undef ARM_VARIANT
25004#define ARM_VARIANT & arm_ext_fp16
25005#undef THUMB_VARIANT
25006#define THUMB_VARIANT & arm_ext_fp16
25007 /* New instructions added from v8.2, allowing the extraction and insertion of
25008 the upper 16 bits of a 32-bit vector register. */
25009 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
25010 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
25011
25012 /* New backported fma/fms instructions optional in v8.2. */
25013 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25014 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25015
25016#undef THUMB_VARIANT
25017#define THUMB_VARIANT & fpu_neon_ext_v1
25018#undef ARM_VARIANT
25019#define ARM_VARIANT & fpu_neon_ext_v1
25020
25021 /* Data processing with three registers of the same length. */
25022 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
25023 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
25024 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
25025 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25026 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25027 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25028 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
25029 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25030 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25031 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25032 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25033 /* If not immediate, fall back to neon_dyadic_i64_su.
25034 shl should accept I8 I16 I32 I64,
25035 qshl should accept S8 S16 S32 S64 U8 U16 U32 U64. */
25036 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl),
25037 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl),
25038 /* Logic ops, types optional & ignored. */
25039 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25040 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25041 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25042 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25043 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
25044 /* Bitfield ops, untyped. */
25045 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25046 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25047 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25048 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25049 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25050 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25051 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
25052 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25053 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25054 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25055 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25056 back to neon_dyadic_if_su. */
25057 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25058 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25059 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25060 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25061 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25062 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25063 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25064 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25065 /* Comparison. Type I8 I16 I32 F32. */
25066 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25067 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
25068 /* As above, D registers only. */
25069 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25070 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25071 /* Int and float variants, signedness unimportant. */
25072 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25073 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25074 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
25075 /* Add/sub take types I8 I16 I32 I64 F32. */
25076 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25077 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25078 /* vtst takes sizes 8, 16, 32. */
25079 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25080 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
25081 /* VMUL takes I8 I16 I32 F32 P8. */
25082 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
25083 /* VQD{R}MULH takes S16 S32. */
25084 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25085 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25086 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25087 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25088 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25089 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25090 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25091 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25092 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25093 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25094 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25095 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25096 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25097 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25098 /* ARM v8.1 extension. */
25099 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25100 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25101 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25102
25103 /* Two address, int/float. Types S8 S16 S32 F32. */
25104 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
25105 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
25106
25107 /* Data processing with two registers and a shift amount. */
25108 /* Right shifts, and variants with rounding.
25109 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
25110 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25111 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25112 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25113 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25114 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25115 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25116 /* Shift and insert. Sizes accepted 8 16 32 64. */
25117 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
25118 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
25119 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
25120 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
25121 /* Right shift immediate, saturating & narrowing, with rounding variants.
25122 Types accepted S16 S32 S64 U16 U32 U64. */
25123 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25124 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25125 /* As above, unsigned. Types accepted S16 S32 S64. */
25126 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25127 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25128 /* Right shift narrowing. Types accepted I16 I32 I64. */
25129 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25130 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25131 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
25132 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
25133 /* CVT with optional immediate for fixed-point variant. */
25134 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
25135
25136 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
25137
25138 /* Data processing, three registers of different lengths. */
25139 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
25140 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
25141 /* If not scalar, fall back to neon_dyadic_long.
25142 Vector types as above, scalar types S16 S32 U16 U32. */
25143 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25144 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25145 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
25146 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25147 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25148 /* Dyadic, narrowing insns. Types I16 I32 I64. */
25149 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25150 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25151 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25152 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25153 /* Saturating doubling multiplies. Types S16 S32. */
25154 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25155 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25156 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25157 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25158 S16 S32 U16 U32. */
25159 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
25160
25161 /* Extract. Size 8. */
25162 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25163 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
25164
25165 /* Two registers, miscellaneous. */
25166 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
25167 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
25168 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
25169 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
25170 /* Vector replicate. Sizes 8 16 32. */
25171 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
25172 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
25173 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
25174 /* VMOVN. Types I16 I32 I64. */
25175 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
25176 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
25177 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
25178 /* VQMOVUN. Types S16 S32 S64. */
25179 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
25180 /* VZIP / VUZP. Sizes 8 16 32. */
25181 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
25182 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
25183 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
25184 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
25185 /* VQABS / VQNEG. Types S8 S16 S32. */
25186 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
25187 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
25188 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
25189 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
25190 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
25191 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
25192 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
25193 /* Reciprocal estimates. Types U32 F16 F32. */
25194 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
25195 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
25196 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
25197 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
25198 /* VCLS. Types S8 S16 S32. */
25199 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
25200 /* VCLZ. Types I8 I16 I32. */
25201 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
25202 /* VCNT. Size 8. */
25203 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
25204 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
25205 /* Two address, untyped. */
25206 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
25207 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
25208 /* VTRN. Sizes 8 16 32. */
25209 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
25210 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
25211
25212 /* Table lookup. Size 8. */
25213 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25214 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25215
25216#undef THUMB_VARIANT
25217#define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
25218#undef ARM_VARIANT
25219#define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
25220
25221 /* Neon element/structure load/store. */
25222 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25223 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25224 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25225 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25226 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25227 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25228 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25229 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25230
25231#undef THUMB_VARIANT
25232#define THUMB_VARIANT & fpu_vfp_ext_v3xd
25233#undef ARM_VARIANT
25234#define ARM_VARIANT & fpu_vfp_ext_v3xd
25235 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
25236 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25237 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25238 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25239 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25240 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25241 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25242 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25243 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25244
25245#undef THUMB_VARIANT
25246#define THUMB_VARIANT & fpu_vfp_ext_v3
25247#undef ARM_VARIANT
25248#define ARM_VARIANT & fpu_vfp_ext_v3
25249
25250 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
25251 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25252 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25253 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25254 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25255 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25256 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25257 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25258 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25259
25260#undef ARM_VARIANT
25261#define ARM_VARIANT & fpu_vfp_ext_fma
25262#undef THUMB_VARIANT
25263#define THUMB_VARIANT & fpu_vfp_ext_fma
25264 /* Mnemonics shared by Neon, VFP, MVE and BF16. These are included in the
25265 VFP FMA variant; NEON and VFP FMA always includes the NEON
25266 FMA instructions. */
25267 mnCEF(vfma, _vfma, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25268 TUF ("vfmat", c300850, fc300850, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25269 mnCEF(vfms, _vfms, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), neon_fmac),
25270
25271 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25272 the v form should always be used. */
25273 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25274 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25275 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25276 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25277 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25278 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25279
25280#undef THUMB_VARIANT
25281#undef ARM_VARIANT
25282#define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
25283
25284 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25285 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25286 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25287 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25288 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25289 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25290 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25291 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25292
25293#undef ARM_VARIANT
25294#define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
25295
25296 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
25297 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
25298 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
25299 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
25300 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
25301 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
25302 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
25303 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
25304 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
25305 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25306 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25307 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25308 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25309 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25310 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25311 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25312 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25313 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25314 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
25315 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
25316 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25317 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25318 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25319 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25320 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25321 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25322 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
25323 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
25324 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
25325 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
25326 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
25327 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
25328 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
25329 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
25330 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
25331 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
25332 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
25333 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25334 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25335 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25336 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25337 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25338 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25339 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25340 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25341 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25342 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
25343 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25344 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25345 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25346 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25347 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25348 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25349 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25350 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25351 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25352 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25353 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25354 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25355 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25356 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25357 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25358 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25359 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25360 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25361 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25362 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25363 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25364 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
25365 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
25366 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25367 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25368 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25369 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25370 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25371 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25372 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25373 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25374 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25375 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25376 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25377 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25378 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25379 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25380 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25381 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25382 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25383 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25384 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
25385 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25386 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25387 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25388 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25389 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25390 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25391 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25392 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25393 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25394 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25395 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25396 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25397 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25398 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25399 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25400 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25401 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25402 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25403 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25404 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25405 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25406 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
25407 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25408 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25409 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25410 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25411 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25412 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25413 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25414 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25415 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25416 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25417 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25418 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25419 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25420 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25421 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25422 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25423 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25424 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25425 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25426 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25427 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
25428 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
25429 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25430 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25431 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25432 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25433 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25434 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25435 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25436 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25437 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25438 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
25439 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
25440 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
25441 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
25442 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
25443 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
25444 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25445 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25446 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25447 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
25448 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
25449 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
25450 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
25451 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
25452 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
25453 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25454 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25455 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25456 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25457 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
25458
25459#undef ARM_VARIANT
25460#define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
25461
25462 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
25463 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
25464 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
25465 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
25466 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
25467 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
25468 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25469 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25470 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25471 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25472 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25473 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25474 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25475 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25476 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25477 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25478 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25479 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25480 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25481 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25482 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
25483 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25484 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25485 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25486 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25487 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25488 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25489 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25490 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25491 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25492 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25493 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25494 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25495 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25496 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25497 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25498 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25499 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25500 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25501 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25502 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25503 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25504 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25505 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25506 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25507 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25508 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25509 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25510 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25511 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25512 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25513 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25514 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25515 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25516 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25517 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25518 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25519
25520#undef ARM_VARIANT
25521#define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
25522
25523 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
25524 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
25525 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
25526 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
25527 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
25528 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
25529 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
25530 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
25531 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
25532 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
25533 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
25534 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
25535 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
25536 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
25537 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
25538 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
25539 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
25540 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
25541 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
25542 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
25543 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
25544 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
25545 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
25546 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
25547 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
25548 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
25549 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
25550 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
25551 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
25552 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
25553 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
25554 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
25555 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
25556 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
25557 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
25558 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
25559 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
25560 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
25561 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
25562 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
25563 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
25564 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
25565 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
25566 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
25567 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
25568 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
25569 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
25570 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
25571 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
25572 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
25573 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
25574 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
25575 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
25576 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
25577 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
25578 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
25579 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
25580 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
25581 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
25582 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
25583 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
25584 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
25585 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
25586 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
25587 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25588 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25589 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25590 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25591 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25592 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25593 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25594 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25595 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25596 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25597 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25598 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25599
25600 /* ARMv8.5-A instructions. */
25601#undef ARM_VARIANT
25602#define ARM_VARIANT & arm_ext_sb
25603#undef THUMB_VARIANT
25604#define THUMB_VARIANT & arm_ext_sb
25605 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
25606
25607#undef ARM_VARIANT
25608#define ARM_VARIANT & arm_ext_predres
25609#undef THUMB_VARIANT
25610#define THUMB_VARIANT & arm_ext_predres
25611 CE("cfprctx", e070f93, 1, (RRnpc), rd),
25612 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
25613 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
25614
25615 /* ARMv8-M instructions. */
25616#undef ARM_VARIANT
25617#define ARM_VARIANT NULL
25618#undef THUMB_VARIANT
25619#define THUMB_VARIANT & arm_ext_v8m
25620 ToU("sg", e97fe97f, 0, (), noargs),
25621 ToC("blxns", 4784, 1, (RRnpc), t_blx),
25622 ToC("bxns", 4704, 1, (RRnpc), t_bx),
25623 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
25624 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
25625 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
25626 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
25627
25628 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
25629 instructions behave as nop if no VFP is present. */
25630#undef THUMB_VARIANT
25631#define THUMB_VARIANT & arm_ext_v8m_main
25632 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
25633 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
25634
25635 /* Armv8.1-M Mainline instructions. */
25636#undef THUMB_VARIANT
25637#define THUMB_VARIANT & arm_ext_v8_1m_main
25638 toU("cinc", _cinc, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25639 toU("cinv", _cinv, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25640 toU("cneg", _cneg, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25641 toU("csel", _csel, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25642 toU("csetm", _csetm, 2, (RRnpcsp, COND), t_cond),
25643 toU("cset", _cset, 2, (RRnpcsp, COND), t_cond),
25644 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25645 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25646 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25647
25648 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
25649 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
25650 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
25651 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
25652 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
25653
25654 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
25655 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
25656 toU("le", _le, 2, (oLR, EXP), t_loloop),
25657
25658 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
25659 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
25660
25661#undef THUMB_VARIANT
25662#define THUMB_VARIANT & mve_ext
25663 ToC("lsll", ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25664 ToC("lsrl", ea50011f, 3, (RRe, RRo, I32), mve_scalar_shift),
25665 ToC("asrl", ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25666 ToC("uqrshll", ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25667 ToC("sqrshrl", ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25668 ToC("uqshll", ea51010f, 3, (RRe, RRo, I32), mve_scalar_shift),
25669 ToC("urshrl", ea51011f, 3, (RRe, RRo, I32), mve_scalar_shift),
25670 ToC("srshrl", ea51012f, 3, (RRe, RRo, I32), mve_scalar_shift),
25671 ToC("sqshll", ea51013f, 3, (RRe, RRo, I32), mve_scalar_shift),
25672 ToC("uqrshl", ea500f0d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
25673 ToC("sqrshr", ea500f2d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
25674 ToC("uqshl", ea500f0f, 2, (RRnpcsp, I32), mve_scalar_shift),
25675 ToC("urshr", ea500f1f, 2, (RRnpcsp, I32), mve_scalar_shift),
25676 ToC("srshr", ea500f2f, 2, (RRnpcsp, I32), mve_scalar_shift),
25677 ToC("sqshl", ea500f3f, 2, (RRnpcsp, I32), mve_scalar_shift),
25678
25679 ToC("vpt", ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25680 ToC("vptt", ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25681 ToC("vpte", ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25682 ToC("vpttt", ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25683 ToC("vptte", ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25684 ToC("vptet", ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25685 ToC("vptee", ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25686 ToC("vptttt", ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25687 ToC("vpttte", ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25688 ToC("vpttet", ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25689 ToC("vpttee", ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25690 ToC("vptett", ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25691 ToC("vptete", ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25692 ToC("vpteet", ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25693 ToC("vpteee", ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25694
25695 ToC("vpst", fe710f4d, 0, (), mve_vpt),
25696 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
25697 ToC("vpste", fe718f4d, 0, (), mve_vpt),
25698 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
25699 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
25700 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
25701 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
25702 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
25703 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
25704 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
25705 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
25706 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
25707 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
25708 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
25709 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
25710
25711 /* MVE and MVE FP only. */
25712 mToC("vhcadd", ee000f00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vhcadd),
25713 mCEF(vctp, _vctp, 1, (RRnpc), mve_vctp),
25714 mCEF(vadc, _vadc, 3, (RMQ, RMQ, RMQ), mve_vadc),
25715 mCEF(vadci, _vadci, 3, (RMQ, RMQ, RMQ), mve_vadc),
25716 mToC("vsbc", fe300f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
25717 mToC("vsbci", fe301f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
25718 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
25719 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
25720 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25721 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25722 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
25723 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
25724 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25725 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25726 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25727 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25728 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
25729 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
25730
25731 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25732 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25733 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25734 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25735 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25736 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25737 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25738 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25739 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25740 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25741 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25742 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25743 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25744 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25745 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25746 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25747 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25748 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25749 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25750 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25751
25752 mCEF(vmovnt, _vmovnt, 2, (RMQ, RMQ), mve_movn),
25753 mCEF(vmovnb, _vmovnb, 2, (RMQ, RMQ), mve_movn),
25754 mCEF(vbrsr, _vbrsr, 3, (RMQ, RMQ, RR), mve_vbrsr),
25755 mCEF(vaddlv, _vaddlv, 3, (RRe, RRo, RMQ), mve_vaddlv),
25756 mCEF(vaddlva, _vaddlva, 3, (RRe, RRo, RMQ), mve_vaddlv),
25757 mCEF(vaddv, _vaddv, 2, (RRe, RMQ), mve_vaddv),
25758 mCEF(vaddva, _vaddva, 2, (RRe, RMQ), mve_vaddv),
25759 mCEF(vddup, _vddup, 3, (RMQ, RRe, EXPi), mve_viddup),
25760 mCEF(vdwdup, _vdwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
25761 mCEF(vidup, _vidup, 3, (RMQ, RRe, EXPi), mve_viddup),
25762 mCEF(viwdup, _viwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
25763 mToC("vmaxa", ee330e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
25764 mToC("vmina", ee331e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
25765 mCEF(vmaxv, _vmaxv, 2, (RR, RMQ), mve_vmaxv),
25766 mCEF(vmaxav, _vmaxav, 2, (RR, RMQ), mve_vmaxv),
25767 mCEF(vminv, _vminv, 2, (RR, RMQ), mve_vmaxv),
25768 mCEF(vminav, _vminav, 2, (RR, RMQ), mve_vmaxv),
25769
25770 mCEF(vmlaldav, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25771 mCEF(vmlaldava, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25772 mCEF(vmlaldavx, _vmlaldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25773 mCEF(vmlaldavax, _vmlaldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25774 mCEF(vmlalv, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25775 mCEF(vmlalva, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25776 mCEF(vmlsldav, _vmlsldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25777 mCEF(vmlsldava, _vmlsldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25778 mCEF(vmlsldavx, _vmlsldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25779 mCEF(vmlsldavax, _vmlsldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25780 mToC("vrmlaldavh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25781 mToC("vrmlaldavha",ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25782 mCEF(vrmlaldavhx, _vrmlaldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25783 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25784 mToC("vrmlalvh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25785 mToC("vrmlalvha", ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25786 mCEF(vrmlsldavh, _vrmlsldavh, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25787 mCEF(vrmlsldavha, _vrmlsldavha, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25788 mCEF(vrmlsldavhx, _vrmlsldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25789 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25790
25791 mToC("vmlas", ee011e40, 3, (RMQ, RMQ, RR), mve_vmlas),
25792 mToC("vmulh", ee010e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
25793 mToC("vrmulh", ee011e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
25794 mToC("vpnot", fe310f4d, 0, (), mve_vpnot),
25795 mToC("vpsel", fe310f01, 3, (RMQ, RMQ, RMQ), mve_vpsel),
25796
25797 mToC("vqdmladh", ee000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25798 mToC("vqdmladhx", ee001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25799 mToC("vqrdmladh", ee000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25800 mToC("vqrdmladhx",ee001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25801 mToC("vqdmlsdh", fe000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25802 mToC("vqdmlsdhx", fe001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25803 mToC("vqrdmlsdh", fe000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25804 mToC("vqrdmlsdhx",fe001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25805 mToC("vqdmlah", ee000e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25806 mToC("vqdmlash", ee001e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25807 mToC("vqrdmlash", ee001e40, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25808 mToC("vqdmullt", ee301f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
25809 mToC("vqdmullb", ee300f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
25810 mCEF(vqmovnt, _vqmovnt, 2, (RMQ, RMQ), mve_vqmovn),
25811 mCEF(vqmovnb, _vqmovnb, 2, (RMQ, RMQ), mve_vqmovn),
25812 mCEF(vqmovunt, _vqmovunt, 2, (RMQ, RMQ), mve_vqmovn),
25813 mCEF(vqmovunb, _vqmovunb, 2, (RMQ, RMQ), mve_vqmovn),
25814
25815 mCEF(vshrnt, _vshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25816 mCEF(vshrnb, _vshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25817 mCEF(vrshrnt, _vrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25818 mCEF(vrshrnb, _vrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25819 mCEF(vqshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25820 mCEF(vqshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25821 mCEF(vqshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25822 mCEF(vqshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25823 mCEF(vqrshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25824 mCEF(vqrshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25825 mCEF(vqrshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25826 mCEF(vqrshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25827
25828 mToC("vshlc", eea00fc0, 3, (RMQ, RR, I32z), mve_vshlc),
25829 mToC("vshllt", ee201e00, 3, (RMQ, RMQ, I32), mve_vshll),
25830 mToC("vshllb", ee200e00, 3, (RMQ, RMQ, I32), mve_vshll),
25831
25832 toU("dlstp", _dlstp, 2, (LR, RR), t_loloop),
25833 toU("wlstp", _wlstp, 3, (LR, RR, EXP), t_loloop),
25834 toU("letp", _letp, 2, (LR, EXP), t_loloop),
25835 toU("lctp", _lctp, 0, (), t_loloop),
25836
25837#undef THUMB_VARIANT
25838#define THUMB_VARIANT & mve_fp_ext
25839 mToC("vcmul", ee300e00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vcmul),
25840 mToC("vfmas", ee311e40, 3, (RMQ, RMQ, RR), mve_vfmas),
25841 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
25842 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
25843 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ), mve_vmaxnmv),
25844 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ), mve_vmaxnmv),
25845 mToC("vminnmv", eeee0f80, 2, (RR, RMQ), mve_vmaxnmv),
25846 mToC("vminnmav",eeec0f80, 2, (RR, RMQ), mve_vmaxnmv),
25847
25848#undef ARM_VARIANT
25849#define ARM_VARIANT & fpu_vfp_ext_v1
25850#undef THUMB_VARIANT
25851#define THUMB_VARIANT & arm_ext_v6t2
25852 mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
25853 mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
25854
25855 mcCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25856
25857#undef ARM_VARIANT
25858#define ARM_VARIANT & fpu_vfp_ext_v1xd
25859
25860 MNCE(vmov, 0, 1, (VMOV), neon_mov),
25861 mcCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
25862 mcCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
25863 mcCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
25864
25865 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
25866 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
25867 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
25868
25869 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
25870 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
25871
25872 mCEF(vmovlt, _vmovlt, 1, (VMOV), mve_movl),
25873 mCEF(vmovlb, _vmovlb, 1, (VMOV), mve_movl),
25874
25875 mnCE(vcmp, _vcmp, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
25876 mnCE(vcmpe, _vcmpe, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
25877
25878#undef ARM_VARIANT
25879#define ARM_VARIANT & fpu_vfp_ext_v2
25880
25881 mcCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
25882 mcCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
25883 mcCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
25884 mcCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
25885
25886#undef ARM_VARIANT
25887#define ARM_VARIANT & fpu_vfp_ext_armv8xd
25888 mnUF(vcvta, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvta),
25889 mnUF(vcvtp, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtp),
25890 mnUF(vcvtn, _vcvta, 3, (RNSDQMQ, oRNSDQMQ, oI32z), neon_cvtn),
25891 mnUF(vcvtm, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtm),
25892 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
25893 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
25894
25895#undef ARM_VARIANT
25896#define ARM_VARIANT & fpu_neon_ext_v1
25897 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
25898 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
25899 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
25900 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
25901 mnUF(vand, _vand, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
25902 mnUF(vbic, _vbic, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
25903 mnUF(vorr, _vorr, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
25904 mnUF(vorn, _vorn, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
25905 mnUF(veor, _veor, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_logic),
25906 MNUF(vcls, 1b00400, 2, (RNDQMQ, RNDQMQ), neon_cls),
25907 MNUF(vclz, 1b00480, 2, (RNDQMQ, RNDQMQ), neon_clz),
25908 mnCE(vdup, _vdup, 2, (RNDQMQ, RR_RNSC), neon_dup),
25909 MNUF(vhadd, 00000000, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
25910 MNUF(vrhadd, 00000100, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_i_su),
25911 MNUF(vhsub, 00000200, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
25912 mnUF(vmin, _vmin, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
25913 mnUF(vmax, _vmax, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
25914 MNUF(vqadd, 0000010, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
25915 MNUF(vqsub, 0000210, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
25916 mnUF(vmvn, _vmvn, 2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
25917 MNUF(vqabs, 1b00700, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
25918 MNUF(vqneg, 1b00780, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
25919 mnUF(vqrdmlah, _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
25920 mnUF(vqdmulh, _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
25921 mnUF(vqrdmulh, _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
25922 MNUF(vqrshl, 0000510, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
25923 MNUF(vrshl, 0000500, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
25924 MNUF(vshr, 0800010, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
25925 MNUF(vrshr, 0800210, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
25926 MNUF(vsli, 1800510, 3, (RNDQMQ, oRNDQMQ, I63), neon_sli),
25927 MNUF(vsri, 1800410, 3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
25928 MNUF(vrev64, 1b00000, 2, (RNDQMQ, RNDQMQ), neon_rev),
25929 MNUF(vrev32, 1b00080, 2, (RNDQMQ, RNDQMQ), neon_rev),
25930 MNUF(vrev16, 1b00100, 2, (RNDQMQ, RNDQMQ), neon_rev),
25931 mnUF(vshl, _vshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
25932 mnUF(vqshl, _vqshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
25933 MNUF(vqshlu, 1800610, 3, (RNDQMQ, oRNDQMQ, I63), neon_qshlu_imm),
25934
25935#undef ARM_VARIANT
25936#define ARM_VARIANT & arm_ext_v8_3
25937#undef THUMB_VARIANT
25938#define THUMB_VARIANT & arm_ext_v6t2_v8m
25939 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
25940 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
25941
25942#undef ARM_VARIANT
25943#define ARM_VARIANT &arm_ext_bf16
25944#undef THUMB_VARIANT
25945#define THUMB_VARIANT &arm_ext_bf16
25946 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
25947 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
25948 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
25949
25950#undef ARM_VARIANT
25951#define ARM_VARIANT &arm_ext_i8mm
25952#undef THUMB_VARIANT
25953#define THUMB_VARIANT &arm_ext_i8mm
25954 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
25955 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
25956 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vummla, vummla),
25957 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
25958 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
25959};
25960#undef ARM_VARIANT
25961#undef THUMB_VARIANT
25962#undef TCE
25963#undef TUE
25964#undef TUF
25965#undef TCC
25966#undef cCE
25967#undef cCL
25968#undef C3E
25969#undef C3
25970#undef CE
25971#undef CM
25972#undef CL
25973#undef UE
25974#undef UF
25975#undef UT
25976#undef NUF
25977#undef nUF
25978#undef NCE
25979#undef nCE
25980#undef OPS0
25981#undef OPS1
25982#undef OPS2
25983#undef OPS3
25984#undef OPS4
25985#undef OPS5
25986#undef OPS6
25987#undef do_0
25988#undef ToC
25989#undef toC
25990#undef ToU
25991#undef toU
25992\f
25993/* MD interface: bits in the object file. */
25994
25995/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
25996 for use in the a.out file, and stores them in the array pointed to by buf.
25997 This knows about the endian-ness of the target machine and does
25998 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
25999 2 (short) and 4 (long) Floating numbers are put out as a series of
26000 LITTLENUMS (shorts, here at least). */
26001
26002void
26003md_number_to_chars (char * buf, valueT val, int n)
26004{
26005 if (target_big_endian)
26006 number_to_chars_bigendian (buf, val, n);
26007 else
26008 number_to_chars_littleendian (buf, val, n);
26009}
26010
26011static valueT
26012md_chars_to_number (char * buf, int n)
26013{
26014 valueT result = 0;
26015 unsigned char * where = (unsigned char *) buf;
26016
26017 if (target_big_endian)
26018 {
26019 while (n--)
26020 {
26021 result <<= 8;
26022 result |= (*where++ & 255);
26023 }
26024 }
26025 else
26026 {
26027 while (n--)
26028 {
26029 result <<= 8;
26030 result |= (where[n] & 255);
26031 }
26032 }
26033
26034 return result;
26035}
26036
26037/* MD interface: Sections. */
26038
26039/* Calculate the maximum variable size (i.e., excluding fr_fix)
26040 that an rs_machine_dependent frag may reach. */
26041
26042unsigned int
26043arm_frag_max_var (fragS *fragp)
26044{
26045 /* We only use rs_machine_dependent for variable-size Thumb instructions,
26046 which are either THUMB_SIZE (2) or INSN_SIZE (4).
26047
26048 Note that we generate relaxable instructions even for cases that don't
26049 really need it, like an immediate that's a trivial constant. So we're
26050 overestimating the instruction size for some of those cases. Rather
26051 than putting more intelligence here, it would probably be better to
26052 avoid generating a relaxation frag in the first place when it can be
26053 determined up front that a short instruction will suffice. */
26054
26055 gas_assert (fragp->fr_type == rs_machine_dependent);
26056 return INSN_SIZE;
26057}
26058
26059/* Estimate the size of a frag before relaxing. Assume everything fits in
26060 2 bytes. */
26061
26062int
26063md_estimate_size_before_relax (fragS * fragp,
26064 segT segtype ATTRIBUTE_UNUSED)
26065{
26066 fragp->fr_var = 2;
26067 return 2;
26068}
26069
26070/* Convert a machine dependent frag. */
26071
26072void
26073md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26074{
26075 unsigned long insn;
26076 unsigned long old_op;
26077 char *buf;
26078 expressionS exp;
26079 fixS *fixp;
26080 int reloc_type;
26081 int pc_rel;
26082 int opcode;
26083
26084 buf = fragp->fr_literal + fragp->fr_fix;
26085
26086 old_op = bfd_get_16(abfd, buf);
26087 if (fragp->fr_symbol)
26088 {
26089 exp.X_op = O_symbol;
26090 exp.X_add_symbol = fragp->fr_symbol;
26091 }
26092 else
26093 {
26094 exp.X_op = O_constant;
26095 }
26096 exp.X_add_number = fragp->fr_offset;
26097 opcode = fragp->fr_subtype;
26098 switch (opcode)
26099 {
26100 case T_MNEM_ldr_pc:
26101 case T_MNEM_ldr_pc2:
26102 case T_MNEM_ldr_sp:
26103 case T_MNEM_str_sp:
26104 case T_MNEM_ldr:
26105 case T_MNEM_ldrb:
26106 case T_MNEM_ldrh:
26107 case T_MNEM_str:
26108 case T_MNEM_strb:
26109 case T_MNEM_strh:
26110 if (fragp->fr_var == 4)
26111 {
26112 insn = THUMB_OP32 (opcode);
26113 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26114 {
26115 insn |= (old_op & 0x700) << 4;
26116 }
26117 else
26118 {
26119 insn |= (old_op & 7) << 12;
26120 insn |= (old_op & 0x38) << 13;
26121 }
26122 insn |= 0x00000c00;
26123 put_thumb32_insn (buf, insn);
26124 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26125 }
26126 else
26127 {
26128 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26129 }
26130 pc_rel = (opcode == T_MNEM_ldr_pc2);
26131 break;
26132 case T_MNEM_adr:
26133 if (fragp->fr_var == 4)
26134 {
26135 insn = THUMB_OP32 (opcode);
26136 insn |= (old_op & 0xf0) << 4;
26137 put_thumb32_insn (buf, insn);
26138 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26139 }
26140 else
26141 {
26142 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26143 exp.X_add_number -= 4;
26144 }
26145 pc_rel = 1;
26146 break;
26147 case T_MNEM_mov:
26148 case T_MNEM_movs:
26149 case T_MNEM_cmp:
26150 case T_MNEM_cmn:
26151 if (fragp->fr_var == 4)
26152 {
26153 int r0off = (opcode == T_MNEM_mov
26154 || opcode == T_MNEM_movs) ? 0 : 8;
26155 insn = THUMB_OP32 (opcode);
26156 insn = (insn & 0xe1ffffff) | 0x10000000;
26157 insn |= (old_op & 0x700) << r0off;
26158 put_thumb32_insn (buf, insn);
26159 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26160 }
26161 else
26162 {
26163 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26164 }
26165 pc_rel = 0;
26166 break;
26167 case T_MNEM_b:
26168 if (fragp->fr_var == 4)
26169 {
26170 insn = THUMB_OP32(opcode);
26171 put_thumb32_insn (buf, insn);
26172 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26173 }
26174 else
26175 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26176 pc_rel = 1;
26177 break;
26178 case T_MNEM_bcond:
26179 if (fragp->fr_var == 4)
26180 {
26181 insn = THUMB_OP32(opcode);
26182 insn |= (old_op & 0xf00) << 14;
26183 put_thumb32_insn (buf, insn);
26184 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26185 }
26186 else
26187 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26188 pc_rel = 1;
26189 break;
26190 case T_MNEM_add_sp:
26191 case T_MNEM_add_pc:
26192 case T_MNEM_inc_sp:
26193 case T_MNEM_dec_sp:
26194 if (fragp->fr_var == 4)
26195 {
26196 /* ??? Choose between add and addw. */
26197 insn = THUMB_OP32 (opcode);
26198 insn |= (old_op & 0xf0) << 4;
26199 put_thumb32_insn (buf, insn);
26200 if (opcode == T_MNEM_add_pc)
26201 reloc_type = BFD_RELOC_ARM_T32_IMM12;
26202 else
26203 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26204 }
26205 else
26206 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26207 pc_rel = 0;
26208 break;
26209
26210 case T_MNEM_addi:
26211 case T_MNEM_addis:
26212 case T_MNEM_subi:
26213 case T_MNEM_subis:
26214 if (fragp->fr_var == 4)
26215 {
26216 insn = THUMB_OP32 (opcode);
26217 insn |= (old_op & 0xf0) << 4;
26218 insn |= (old_op & 0xf) << 16;
26219 put_thumb32_insn (buf, insn);
26220 if (insn & (1 << 20))
26221 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26222 else
26223 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26224 }
26225 else
26226 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26227 pc_rel = 0;
26228 break;
26229 default:
26230 abort ();
26231 }
26232 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26233 (enum bfd_reloc_code_real) reloc_type);
26234 fixp->fx_file = fragp->fr_file;
26235 fixp->fx_line = fragp->fr_line;
26236 fragp->fr_fix += fragp->fr_var;
26237
26238 /* Set whether we use thumb-2 ISA based on final relaxation results. */
26239 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26240 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26241 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26242}
26243
26244/* Return the size of a relaxable immediate operand instruction.
26245 SHIFT and SIZE specify the form of the allowable immediate. */
26246static int
26247relax_immediate (fragS *fragp, int size, int shift)
26248{
26249 offsetT offset;
26250 offsetT mask;
26251 offsetT low;
26252
26253 /* ??? Should be able to do better than this. */
26254 if (fragp->fr_symbol)
26255 return 4;
26256
26257 low = (1 << shift) - 1;
26258 mask = (1 << (shift + size)) - (1 << shift);
26259 offset = fragp->fr_offset;
26260 /* Force misaligned offsets to 32-bit variant. */
26261 if (offset & low)
26262 return 4;
26263 if (offset & ~mask)
26264 return 4;
26265 return 2;
26266}
26267
26268/* Get the address of a symbol during relaxation. */
26269static addressT
26270relaxed_symbol_addr (fragS *fragp, long stretch)
26271{
26272 fragS *sym_frag;
26273 addressT addr;
26274 symbolS *sym;
26275
26276 sym = fragp->fr_symbol;
26277 sym_frag = symbol_get_frag (sym);
26278 know (S_GET_SEGMENT (sym) != absolute_section
26279 || sym_frag == &zero_address_frag);
26280 addr = S_GET_VALUE (sym) + fragp->fr_offset;
26281
26282 /* If frag has yet to be reached on this pass, assume it will
26283 move by STRETCH just as we did. If this is not so, it will
26284 be because some frag between grows, and that will force
26285 another pass. */
26286
26287 if (stretch != 0
26288 && sym_frag->relax_marker != fragp->relax_marker)
26289 {
26290 fragS *f;
26291
26292 /* Adjust stretch for any alignment frag. Note that if have
26293 been expanding the earlier code, the symbol may be
26294 defined in what appears to be an earlier frag. FIXME:
26295 This doesn't handle the fr_subtype field, which specifies
26296 a maximum number of bytes to skip when doing an
26297 alignment. */
26298 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
26299 {
26300 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
26301 {
26302 if (stretch < 0)
26303 stretch = - ((- stretch)
26304 & ~ ((1 << (int) f->fr_offset) - 1));
26305 else
26306 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
26307 if (stretch == 0)
26308 break;
26309 }
26310 }
26311 if (f != NULL)
26312 addr += stretch;
26313 }
26314
26315 return addr;
26316}
26317
26318/* Return the size of a relaxable adr pseudo-instruction or PC-relative
26319 load. */
26320static int
26321relax_adr (fragS *fragp, asection *sec, long stretch)
26322{
26323 addressT addr;
26324 offsetT val;
26325
26326 /* Assume worst case for symbols not known to be in the same section. */
26327 if (fragp->fr_symbol == NULL
26328 || !S_IS_DEFINED (fragp->fr_symbol)
26329 || sec != S_GET_SEGMENT (fragp->fr_symbol)
26330 || S_IS_WEAK (fragp->fr_symbol))
26331 return 4;
26332
26333 val = relaxed_symbol_addr (fragp, stretch);
26334 addr = fragp->fr_address + fragp->fr_fix;
26335 addr = (addr + 4) & ~3;
26336 /* Force misaligned targets to 32-bit variant. */
26337 if (val & 3)
26338 return 4;
26339 val -= addr;
26340 if (val < 0 || val > 1020)
26341 return 4;
26342 return 2;
26343}
26344
26345/* Return the size of a relaxable add/sub immediate instruction. */
26346static int
26347relax_addsub (fragS *fragp, asection *sec)
26348{
26349 char *buf;
26350 int op;
26351
26352 buf = fragp->fr_literal + fragp->fr_fix;
26353 op = bfd_get_16(sec->owner, buf);
26354 if ((op & 0xf) == ((op >> 4) & 0xf))
26355 return relax_immediate (fragp, 8, 0);
26356 else
26357 return relax_immediate (fragp, 3, 0);
26358}
26359
26360/* Return TRUE iff the definition of symbol S could be pre-empted
26361 (overridden) at link or load time. */
26362static bfd_boolean
26363symbol_preemptible (symbolS *s)
26364{
26365 /* Weak symbols can always be pre-empted. */
26366 if (S_IS_WEAK (s))
26367 return TRUE;
26368
26369 /* Non-global symbols cannot be pre-empted. */
26370 if (! S_IS_EXTERNAL (s))
26371 return FALSE;
26372
26373#ifdef OBJ_ELF
26374 /* In ELF, a global symbol can be marked protected, or private. In that
26375 case it can't be pre-empted (other definitions in the same link unit
26376 would violate the ODR). */
26377 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
26378 return FALSE;
26379#endif
26380
26381 /* Other global symbols might be pre-empted. */
26382 return TRUE;
26383}
26384
26385/* Return the size of a relaxable branch instruction. BITS is the
26386 size of the offset field in the narrow instruction. */
26387
26388static int
26389relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
26390{
26391 addressT addr;
26392 offsetT val;
26393 offsetT limit;
26394
26395 /* Assume worst case for symbols not known to be in the same section. */
26396 if (!S_IS_DEFINED (fragp->fr_symbol)
26397 || sec != S_GET_SEGMENT (fragp->fr_symbol)
26398 || S_IS_WEAK (fragp->fr_symbol))
26399 return 4;
26400
26401#ifdef OBJ_ELF
26402 /* A branch to a function in ARM state will require interworking. */
26403 if (S_IS_DEFINED (fragp->fr_symbol)
26404 && ARM_IS_FUNC (fragp->fr_symbol))
26405 return 4;
26406#endif
26407
26408 if (symbol_preemptible (fragp->fr_symbol))
26409 return 4;
26410
26411 val = relaxed_symbol_addr (fragp, stretch);
26412 addr = fragp->fr_address + fragp->fr_fix + 4;
26413 val -= addr;
26414
26415 /* Offset is a signed value *2 */
26416 limit = 1 << bits;
26417 if (val >= limit || val < -limit)
26418 return 4;
26419 return 2;
26420}
26421
26422
26423/* Relax a machine dependent frag. This returns the amount by which
26424 the current size of the frag should change. */
26425
26426int
26427arm_relax_frag (asection *sec, fragS *fragp, long stretch)
26428{
26429 int oldsize;
26430 int newsize;
26431
26432 oldsize = fragp->fr_var;
26433 switch (fragp->fr_subtype)
26434 {
26435 case T_MNEM_ldr_pc2:
26436 newsize = relax_adr (fragp, sec, stretch);
26437 break;
26438 case T_MNEM_ldr_pc:
26439 case T_MNEM_ldr_sp:
26440 case T_MNEM_str_sp:
26441 newsize = relax_immediate (fragp, 8, 2);
26442 break;
26443 case T_MNEM_ldr:
26444 case T_MNEM_str:
26445 newsize = relax_immediate (fragp, 5, 2);
26446 break;
26447 case T_MNEM_ldrh:
26448 case T_MNEM_strh:
26449 newsize = relax_immediate (fragp, 5, 1);
26450 break;
26451 case T_MNEM_ldrb:
26452 case T_MNEM_strb:
26453 newsize = relax_immediate (fragp, 5, 0);
26454 break;
26455 case T_MNEM_adr:
26456 newsize = relax_adr (fragp, sec, stretch);
26457 break;
26458 case T_MNEM_mov:
26459 case T_MNEM_movs:
26460 case T_MNEM_cmp:
26461 case T_MNEM_cmn:
26462 newsize = relax_immediate (fragp, 8, 0);
26463 break;
26464 case T_MNEM_b:
26465 newsize = relax_branch (fragp, sec, 11, stretch);
26466 break;
26467 case T_MNEM_bcond:
26468 newsize = relax_branch (fragp, sec, 8, stretch);
26469 break;
26470 case T_MNEM_add_sp:
26471 case T_MNEM_add_pc:
26472 newsize = relax_immediate (fragp, 8, 2);
26473 break;
26474 case T_MNEM_inc_sp:
26475 case T_MNEM_dec_sp:
26476 newsize = relax_immediate (fragp, 7, 2);
26477 break;
26478 case T_MNEM_addi:
26479 case T_MNEM_addis:
26480 case T_MNEM_subi:
26481 case T_MNEM_subis:
26482 newsize = relax_addsub (fragp, sec);
26483 break;
26484 default:
26485 abort ();
26486 }
26487
26488 fragp->fr_var = newsize;
26489 /* Freeze wide instructions that are at or before the same location as
26490 in the previous pass. This avoids infinite loops.
26491 Don't freeze them unconditionally because targets may be artificially
26492 misaligned by the expansion of preceding frags. */
26493 if (stretch <= 0 && newsize > 2)
26494 {
26495 md_convert_frag (sec->owner, sec, fragp);
26496 frag_wane (fragp);
26497 }
26498
26499 return newsize - oldsize;
26500}
26501
26502/* Round up a section size to the appropriate boundary. */
26503
26504valueT
26505md_section_align (segT segment ATTRIBUTE_UNUSED,
26506 valueT size)
26507{
26508 return size;
26509}
26510
26511/* This is called from HANDLE_ALIGN in write.c. Fill in the contents
26512 of an rs_align_code fragment. */
26513
26514void
26515arm_handle_align (fragS * fragP)
26516{
26517 static unsigned char const arm_noop[2][2][4] =
26518 {
26519 { /* ARMv1 */
26520 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
26521 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
26522 },
26523 { /* ARMv6k */
26524 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
26525 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
26526 },
26527 };
26528 static unsigned char const thumb_noop[2][2][2] =
26529 {
26530 { /* Thumb-1 */
26531 {0xc0, 0x46}, /* LE */
26532 {0x46, 0xc0}, /* BE */
26533 },
26534 { /* Thumb-2 */
26535 {0x00, 0xbf}, /* LE */
26536 {0xbf, 0x00} /* BE */
26537 }
26538 };
26539 static unsigned char const wide_thumb_noop[2][4] =
26540 { /* Wide Thumb-2 */
26541 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
26542 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
26543 };
26544
26545 unsigned bytes, fix, noop_size;
26546 char * p;
26547 const unsigned char * noop;
26548 const unsigned char *narrow_noop = NULL;
26549#ifdef OBJ_ELF
26550 enum mstate state;
26551#endif
26552
26553 if (fragP->fr_type != rs_align_code)
26554 return;
26555
26556 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
26557 p = fragP->fr_literal + fragP->fr_fix;
26558 fix = 0;
26559
26560 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
26561 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
26562
26563 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
26564
26565 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
26566 {
26567 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26568 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
26569 {
26570 narrow_noop = thumb_noop[1][target_big_endian];
26571 noop = wide_thumb_noop[target_big_endian];
26572 }
26573 else
26574 noop = thumb_noop[0][target_big_endian];
26575 noop_size = 2;
26576#ifdef OBJ_ELF
26577 state = MAP_THUMB;
26578#endif
26579 }
26580 else
26581 {
26582 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26583 ? selected_cpu : arm_arch_none,
26584 arm_ext_v6k) != 0]
26585 [target_big_endian];
26586 noop_size = 4;
26587#ifdef OBJ_ELF
26588 state = MAP_ARM;
26589#endif
26590 }
26591
26592 fragP->fr_var = noop_size;
26593
26594 if (bytes & (noop_size - 1))
26595 {
26596 fix = bytes & (noop_size - 1);
26597#ifdef OBJ_ELF
26598 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
26599#endif
26600 memset (p, 0, fix);
26601 p += fix;
26602 bytes -= fix;
26603 }
26604
26605 if (narrow_noop)
26606 {
26607 if (bytes & noop_size)
26608 {
26609 /* Insert a narrow noop. */
26610 memcpy (p, narrow_noop, noop_size);
26611 p += noop_size;
26612 bytes -= noop_size;
26613 fix += noop_size;
26614 }
26615
26616 /* Use wide noops for the remainder */
26617 noop_size = 4;
26618 }
26619
26620 while (bytes >= noop_size)
26621 {
26622 memcpy (p, noop, noop_size);
26623 p += noop_size;
26624 bytes -= noop_size;
26625 fix += noop_size;
26626 }
26627
26628 fragP->fr_fix += fix;
26629}
26630
26631/* Called from md_do_align. Used to create an alignment
26632 frag in a code section. */
26633
26634void
26635arm_frag_align_code (int n, int max)
26636{
26637 char * p;
26638
26639 /* We assume that there will never be a requirement
26640 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
26641 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
26642 {
26643 char err_msg[128];
26644
26645 sprintf (err_msg,
26646 _("alignments greater than %d bytes not supported in .text sections."),
26647 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
26648 as_fatal ("%s", err_msg);
26649 }
26650
26651 p = frag_var (rs_align_code,
26652 MAX_MEM_FOR_RS_ALIGN_CODE,
26653 1,
26654 (relax_substateT) max,
26655 (symbolS *) NULL,
26656 (offsetT) n,
26657 (char *) NULL);
26658 *p = 0;
26659}
26660
26661/* Perform target specific initialisation of a frag.
26662 Note - despite the name this initialisation is not done when the frag
26663 is created, but only when its type is assigned. A frag can be created
26664 and used a long time before its type is set, so beware of assuming that
26665 this initialisation is performed first. */
26666
26667#ifndef OBJ_ELF
26668void
26669arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
26670{
26671 /* Record whether this frag is in an ARM or a THUMB area. */
26672 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26673}
26674
26675#else /* OBJ_ELF is defined. */
26676void
26677arm_init_frag (fragS * fragP, int max_chars)
26678{
26679 bfd_boolean frag_thumb_mode;
26680
26681 /* If the current ARM vs THUMB mode has not already
26682 been recorded into this frag then do so now. */
26683 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
26684 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26685
26686 /* PR 21809: Do not set a mapping state for debug sections
26687 - it just confuses other tools. */
26688 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
26689 return;
26690
26691 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
26692
26693 /* Record a mapping symbol for alignment frags. We will delete this
26694 later if the alignment ends up empty. */
26695 switch (fragP->fr_type)
26696 {
26697 case rs_align:
26698 case rs_align_test:
26699 case rs_fill:
26700 mapping_state_2 (MAP_DATA, max_chars);
26701 break;
26702 case rs_align_code:
26703 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
26704 break;
26705 default:
26706 break;
26707 }
26708}
26709
26710/* When we change sections we need to issue a new mapping symbol. */
26711
26712void
26713arm_elf_change_section (void)
26714{
26715 /* Link an unlinked unwind index table section to the .text section. */
26716 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
26717 && elf_linked_to_section (now_seg) == NULL)
26718 elf_linked_to_section (now_seg) = text_section;
26719}
26720
26721int
26722arm_elf_section_type (const char * str, size_t len)
26723{
26724 if (len == 5 && strncmp (str, "exidx", 5) == 0)
26725 return SHT_ARM_EXIDX;
26726
26727 return -1;
26728}
26729\f
26730/* Code to deal with unwinding tables. */
26731
26732static void add_unwind_adjustsp (offsetT);
26733
26734/* Generate any deferred unwind frame offset. */
26735
26736static void
26737flush_pending_unwind (void)
26738{
26739 offsetT offset;
26740
26741 offset = unwind.pending_offset;
26742 unwind.pending_offset = 0;
26743 if (offset != 0)
26744 add_unwind_adjustsp (offset);
26745}
26746
26747/* Add an opcode to this list for this function. Two-byte opcodes should
26748 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
26749 order. */
26750
26751static void
26752add_unwind_opcode (valueT op, int length)
26753{
26754 /* Add any deferred stack adjustment. */
26755 if (unwind.pending_offset)
26756 flush_pending_unwind ();
26757
26758 unwind.sp_restored = 0;
26759
26760 if (unwind.opcode_count + length > unwind.opcode_alloc)
26761 {
26762 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
26763 if (unwind.opcodes)
26764 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
26765 unwind.opcode_alloc);
26766 else
26767 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
26768 }
26769 while (length > 0)
26770 {
26771 length--;
26772 unwind.opcodes[unwind.opcode_count] = op & 0xff;
26773 op >>= 8;
26774 unwind.opcode_count++;
26775 }
26776}
26777
26778/* Add unwind opcodes to adjust the stack pointer. */
26779
26780static void
26781add_unwind_adjustsp (offsetT offset)
26782{
26783 valueT op;
26784
26785 if (offset > 0x200)
26786 {
26787 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
26788 char bytes[5];
26789 int n;
26790 valueT o;
26791
26792 /* Long form: 0xb2, uleb128. */
26793 /* This might not fit in a word so add the individual bytes,
26794 remembering the list is built in reverse order. */
26795 o = (valueT) ((offset - 0x204) >> 2);
26796 if (o == 0)
26797 add_unwind_opcode (0, 1);
26798
26799 /* Calculate the uleb128 encoding of the offset. */
26800 n = 0;
26801 while (o)
26802 {
26803 bytes[n] = o & 0x7f;
26804 o >>= 7;
26805 if (o)
26806 bytes[n] |= 0x80;
26807 n++;
26808 }
26809 /* Add the insn. */
26810 for (; n; n--)
26811 add_unwind_opcode (bytes[n - 1], 1);
26812 add_unwind_opcode (0xb2, 1);
26813 }
26814 else if (offset > 0x100)
26815 {
26816 /* Two short opcodes. */
26817 add_unwind_opcode (0x3f, 1);
26818 op = (offset - 0x104) >> 2;
26819 add_unwind_opcode (op, 1);
26820 }
26821 else if (offset > 0)
26822 {
26823 /* Short opcode. */
26824 op = (offset - 4) >> 2;
26825 add_unwind_opcode (op, 1);
26826 }
26827 else if (offset < 0)
26828 {
26829 offset = -offset;
26830 while (offset > 0x100)
26831 {
26832 add_unwind_opcode (0x7f, 1);
26833 offset -= 0x100;
26834 }
26835 op = ((offset - 4) >> 2) | 0x40;
26836 add_unwind_opcode (op, 1);
26837 }
26838}
26839
26840/* Finish the list of unwind opcodes for this function. */
26841
26842static void
26843finish_unwind_opcodes (void)
26844{
26845 valueT op;
26846
26847 if (unwind.fp_used)
26848 {
26849 /* Adjust sp as necessary. */
26850 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
26851 flush_pending_unwind ();
26852
26853 /* After restoring sp from the frame pointer. */
26854 op = 0x90 | unwind.fp_reg;
26855 add_unwind_opcode (op, 1);
26856 }
26857 else
26858 flush_pending_unwind ();
26859}
26860
26861
26862/* Start an exception table entry. If idx is nonzero this is an index table
26863 entry. */
26864
26865static void
26866start_unwind_section (const segT text_seg, int idx)
26867{
26868 const char * text_name;
26869 const char * prefix;
26870 const char * prefix_once;
26871 const char * group_name;
26872 char * sec_name;
26873 int type;
26874 int flags;
26875 int linkonce;
26876
26877 if (idx)
26878 {
26879 prefix = ELF_STRING_ARM_unwind;
26880 prefix_once = ELF_STRING_ARM_unwind_once;
26881 type = SHT_ARM_EXIDX;
26882 }
26883 else
26884 {
26885 prefix = ELF_STRING_ARM_unwind_info;
26886 prefix_once = ELF_STRING_ARM_unwind_info_once;
26887 type = SHT_PROGBITS;
26888 }
26889
26890 text_name = segment_name (text_seg);
26891 if (streq (text_name, ".text"))
26892 text_name = "";
26893
26894 if (strncmp (text_name, ".gnu.linkonce.t.",
26895 strlen (".gnu.linkonce.t.")) == 0)
26896 {
26897 prefix = prefix_once;
26898 text_name += strlen (".gnu.linkonce.t.");
26899 }
26900
26901 sec_name = concat (prefix, text_name, (char *) NULL);
26902
26903 flags = SHF_ALLOC;
26904 linkonce = 0;
26905 group_name = 0;
26906
26907 /* Handle COMDAT group. */
26908 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
26909 {
26910 group_name = elf_group_name (text_seg);
26911 if (group_name == NULL)
26912 {
26913 as_bad (_("Group section `%s' has no group signature"),
26914 segment_name (text_seg));
26915 ignore_rest_of_line ();
26916 return;
26917 }
26918 flags |= SHF_GROUP;
26919 linkonce = 1;
26920 }
26921
26922 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
26923 linkonce, 0);
26924
26925 /* Set the section link for index tables. */
26926 if (idx)
26927 elf_linked_to_section (now_seg) = text_seg;
26928}
26929
26930
26931/* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
26932 personality routine data. Returns zero, or the index table value for
26933 an inline entry. */
26934
26935static valueT
26936create_unwind_entry (int have_data)
26937{
26938 int size;
26939 addressT where;
26940 char *ptr;
26941 /* The current word of data. */
26942 valueT data;
26943 /* The number of bytes left in this word. */
26944 int n;
26945
26946 finish_unwind_opcodes ();
26947
26948 /* Remember the current text section. */
26949 unwind.saved_seg = now_seg;
26950 unwind.saved_subseg = now_subseg;
26951
26952 start_unwind_section (now_seg, 0);
26953
26954 if (unwind.personality_routine == NULL)
26955 {
26956 if (unwind.personality_index == -2)
26957 {
26958 if (have_data)
26959 as_bad (_("handlerdata in cantunwind frame"));
26960 return 1; /* EXIDX_CANTUNWIND. */
26961 }
26962
26963 /* Use a default personality routine if none is specified. */
26964 if (unwind.personality_index == -1)
26965 {
26966 if (unwind.opcode_count > 3)
26967 unwind.personality_index = 1;
26968 else
26969 unwind.personality_index = 0;
26970 }
26971
26972 /* Space for the personality routine entry. */
26973 if (unwind.personality_index == 0)
26974 {
26975 if (unwind.opcode_count > 3)
26976 as_bad (_("too many unwind opcodes for personality routine 0"));
26977
26978 if (!have_data)
26979 {
26980 /* All the data is inline in the index table. */
26981 data = 0x80;
26982 n = 3;
26983 while (unwind.opcode_count > 0)
26984 {
26985 unwind.opcode_count--;
26986 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
26987 n--;
26988 }
26989
26990 /* Pad with "finish" opcodes. */
26991 while (n--)
26992 data = (data << 8) | 0xb0;
26993
26994 return data;
26995 }
26996 size = 0;
26997 }
26998 else
26999 /* We get two opcodes "free" in the first word. */
27000 size = unwind.opcode_count - 2;
27001 }
27002 else
27003 {
27004 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
27005 if (unwind.personality_index != -1)
27006 {
27007 as_bad (_("attempt to recreate an unwind entry"));
27008 return 1;
27009 }
27010
27011 /* An extra byte is required for the opcode count. */
27012 size = unwind.opcode_count + 1;
27013 }
27014
27015 size = (size + 3) >> 2;
27016 if (size > 0xff)
27017 as_bad (_("too many unwind opcodes"));
27018
27019 frag_align (2, 0, 0);
27020 record_alignment (now_seg, 2);
27021 unwind.table_entry = expr_build_dot ();
27022
27023 /* Allocate the table entry. */
27024 ptr = frag_more ((size << 2) + 4);
27025 /* PR 13449: Zero the table entries in case some of them are not used. */
27026 memset (ptr, 0, (size << 2) + 4);
27027 where = frag_now_fix () - ((size << 2) + 4);
27028
27029 switch (unwind.personality_index)
27030 {
27031 case -1:
27032 /* ??? Should this be a PLT generating relocation? */
27033 /* Custom personality routine. */
27034 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27035 BFD_RELOC_ARM_PREL31);
27036
27037 where += 4;
27038 ptr += 4;
27039
27040 /* Set the first byte to the number of additional words. */
27041 data = size > 0 ? size - 1 : 0;
27042 n = 3;
27043 break;
27044
27045 /* ABI defined personality routines. */
27046 case 0:
27047 /* Three opcodes bytes are packed into the first word. */
27048 data = 0x80;
27049 n = 3;
27050 break;
27051
27052 case 1:
27053 case 2:
27054 /* The size and first two opcode bytes go in the first word. */
27055 data = ((0x80 + unwind.personality_index) << 8) | size;
27056 n = 2;
27057 break;
27058
27059 default:
27060 /* Should never happen. */
27061 abort ();
27062 }
27063
27064 /* Pack the opcodes into words (MSB first), reversing the list at the same
27065 time. */
27066 while (unwind.opcode_count > 0)
27067 {
27068 if (n == 0)
27069 {
27070 md_number_to_chars (ptr, data, 4);
27071 ptr += 4;
27072 n = 4;
27073 data = 0;
27074 }
27075 unwind.opcode_count--;
27076 n--;
27077 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27078 }
27079
27080 /* Finish off the last word. */
27081 if (n < 4)
27082 {
27083 /* Pad with "finish" opcodes. */
27084 while (n--)
27085 data = (data << 8) | 0xb0;
27086
27087 md_number_to_chars (ptr, data, 4);
27088 }
27089
27090 if (!have_data)
27091 {
27092 /* Add an empty descriptor if there is no user-specified data. */
27093 ptr = frag_more (4);
27094 md_number_to_chars (ptr, 0, 4);
27095 }
27096
27097 return 0;
27098}
27099
27100
27101/* Initialize the DWARF-2 unwind information for this procedure. */
27102
27103void
27104tc_arm_frame_initial_instructions (void)
27105{
27106 cfi_add_CFA_def_cfa (REG_SP, 0);
27107}
27108#endif /* OBJ_ELF */
27109
27110/* Convert REGNAME to a DWARF-2 register number. */
27111
27112int
27113tc_arm_regname_to_dw2regnum (char *regname)
27114{
27115 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27116 if (reg != FAIL)
27117 return reg;
27118
27119 /* PR 16694: Allow VFP registers as well. */
27120 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27121 if (reg != FAIL)
27122 return 64 + reg;
27123
27124 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27125 if (reg != FAIL)
27126 return reg + 256;
27127
27128 return FAIL;
27129}
27130
27131#ifdef TE_PE
27132void
27133tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27134{
27135 expressionS exp;
27136
27137 exp.X_op = O_secrel;
27138 exp.X_add_symbol = symbol;
27139 exp.X_add_number = 0;
27140 emit_expr (&exp, size);
27141}
27142#endif
27143
27144/* MD interface: Symbol and relocation handling. */
27145
27146/* Return the address within the segment that a PC-relative fixup is
27147 relative to. For ARM, PC-relative fixups applied to instructions
27148 are generally relative to the location of the fixup plus 8 bytes.
27149 Thumb branches are offset by 4, and Thumb loads relative to PC
27150 require special handling. */
27151
27152long
27153md_pcrel_from_section (fixS * fixP, segT seg)
27154{
27155 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27156
27157 /* If this is pc-relative and we are going to emit a relocation
27158 then we just want to put out any pipeline compensation that the linker
27159 will need. Otherwise we want to use the calculated base.
27160 For WinCE we skip the bias for externals as well, since this
27161 is how the MS ARM-CE assembler behaves and we want to be compatible. */
27162 if (fixP->fx_pcrel
27163 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27164 || (arm_force_relocation (fixP)
27165#ifdef TE_WINCE
27166 && !S_IS_EXTERNAL (fixP->fx_addsy)
27167#endif
27168 )))
27169 base = 0;
27170
27171
27172 switch (fixP->fx_r_type)
27173 {
27174 /* PC relative addressing on the Thumb is slightly odd as the
27175 bottom two bits of the PC are forced to zero for the
27176 calculation. This happens *after* application of the
27177 pipeline offset. However, Thumb adrl already adjusts for
27178 this, so we need not do it again. */
27179 case BFD_RELOC_ARM_THUMB_ADD:
27180 return base & ~3;
27181
27182 case BFD_RELOC_ARM_THUMB_OFFSET:
27183 case BFD_RELOC_ARM_T32_OFFSET_IMM:
27184 case BFD_RELOC_ARM_T32_ADD_PC12:
27185 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27186 return (base + 4) & ~3;
27187
27188 /* Thumb branches are simply offset by +4. */
27189 case BFD_RELOC_THUMB_PCREL_BRANCH5:
27190 case BFD_RELOC_THUMB_PCREL_BRANCH7:
27191 case BFD_RELOC_THUMB_PCREL_BRANCH9:
27192 case BFD_RELOC_THUMB_PCREL_BRANCH12:
27193 case BFD_RELOC_THUMB_PCREL_BRANCH20:
27194 case BFD_RELOC_THUMB_PCREL_BRANCH25:
27195 case BFD_RELOC_THUMB_PCREL_BFCSEL:
27196 case BFD_RELOC_ARM_THUMB_BF17:
27197 case BFD_RELOC_ARM_THUMB_BF19:
27198 case BFD_RELOC_ARM_THUMB_BF13:
27199 case BFD_RELOC_ARM_THUMB_LOOP12:
27200 return base + 4;
27201
27202 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27203 if (fixP->fx_addsy
27204 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27205 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27206 && ARM_IS_FUNC (fixP->fx_addsy)
27207 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27208 base = fixP->fx_where + fixP->fx_frag->fr_address;
27209 return base + 4;
27210
27211 /* BLX is like branches above, but forces the low two bits of PC to
27212 zero. */
27213 case BFD_RELOC_THUMB_PCREL_BLX:
27214 if (fixP->fx_addsy
27215 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27216 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27217 && THUMB_IS_FUNC (fixP->fx_addsy)
27218 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27219 base = fixP->fx_where + fixP->fx_frag->fr_address;
27220 return (base + 4) & ~3;
27221
27222 /* ARM mode branches are offset by +8. However, the Windows CE
27223 loader expects the relocation not to take this into account. */
27224 case BFD_RELOC_ARM_PCREL_BLX:
27225 if (fixP->fx_addsy
27226 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27227 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27228 && ARM_IS_FUNC (fixP->fx_addsy)
27229 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27230 base = fixP->fx_where + fixP->fx_frag->fr_address;
27231 return base + 8;
27232
27233 case BFD_RELOC_ARM_PCREL_CALL:
27234 if (fixP->fx_addsy
27235 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27236 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27237 && THUMB_IS_FUNC (fixP->fx_addsy)
27238 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27239 base = fixP->fx_where + fixP->fx_frag->fr_address;
27240 return base + 8;
27241
27242 case BFD_RELOC_ARM_PCREL_BRANCH:
27243 case BFD_RELOC_ARM_PCREL_JUMP:
27244 case BFD_RELOC_ARM_PLT32:
27245#ifdef TE_WINCE
27246 /* When handling fixups immediately, because we have already
27247 discovered the value of a symbol, or the address of the frag involved
27248 we must account for the offset by +8, as the OS loader will never see the reloc.
27249 see fixup_segment() in write.c
27250 The S_IS_EXTERNAL test handles the case of global symbols.
27251 Those need the calculated base, not just the pipe compensation the linker will need. */
27252 if (fixP->fx_pcrel
27253 && fixP->fx_addsy != NULL
27254 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27255 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27256 return base + 8;
27257 return base;
27258#else
27259 return base + 8;
27260#endif
27261
27262
27263 /* ARM mode loads relative to PC are also offset by +8. Unlike
27264 branches, the Windows CE loader *does* expect the relocation
27265 to take this into account. */
27266 case BFD_RELOC_ARM_OFFSET_IMM:
27267 case BFD_RELOC_ARM_OFFSET_IMM8:
27268 case BFD_RELOC_ARM_HWLITERAL:
27269 case BFD_RELOC_ARM_LITERAL:
27270 case BFD_RELOC_ARM_CP_OFF_IMM:
27271 return base + 8;
27272
27273
27274 /* Other PC-relative relocations are un-offset. */
27275 default:
27276 return base;
27277 }
27278}
27279
27280static bfd_boolean flag_warn_syms = TRUE;
27281
27282bfd_boolean
27283arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27284{
27285 /* PR 18347 - Warn if the user attempts to create a symbol with the same
27286 name as an ARM instruction. Whilst strictly speaking it is allowed, it
27287 does mean that the resulting code might be very confusing to the reader.
27288 Also this warning can be triggered if the user omits an operand before
27289 an immediate address, eg:
27290
27291 LDR =foo
27292
27293 GAS treats this as an assignment of the value of the symbol foo to a
27294 symbol LDR, and so (without this code) it will not issue any kind of
27295 warning or error message.
27296
27297 Note - ARM instructions are case-insensitive but the strings in the hash
27298 table are all stored in lower case, so we must first ensure that name is
27299 lower case too. */
27300 if (flag_warn_syms && arm_ops_hsh)
27301 {
27302 char * nbuf = strdup (name);
27303 char * p;
27304
27305 for (p = nbuf; *p; p++)
27306 *p = TOLOWER (*p);
27307 if (hash_find (arm_ops_hsh, nbuf) != NULL)
27308 {
27309 static struct hash_control * already_warned = NULL;
27310
27311 if (already_warned == NULL)
27312 already_warned = hash_new ();
27313 /* Only warn about the symbol once. To keep the code
27314 simple we let hash_insert do the lookup for us. */
27315 if (hash_insert (already_warned, nbuf, NULL) == NULL)
27316 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
27317 }
27318 else
27319 free (nbuf);
27320 }
27321
27322 return FALSE;
27323}
27324
27325/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
27326 Otherwise we have no need to default values of symbols. */
27327
27328symbolS *
27329md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
27330{
27331#ifdef OBJ_ELF
27332 if (name[0] == '_' && name[1] == 'G'
27333 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
27334 {
27335 if (!GOT_symbol)
27336 {
27337 if (symbol_find (name))
27338 as_bad (_("GOT already in the symbol table"));
27339
27340 GOT_symbol = symbol_new (name, undefined_section,
27341 (valueT) 0, & zero_address_frag);
27342 }
27343
27344 return GOT_symbol;
27345 }
27346#endif
27347
27348 return NULL;
27349}
27350
27351/* Subroutine of md_apply_fix. Check to see if an immediate can be
27352 computed as two separate immediate values, added together. We
27353 already know that this value cannot be computed by just one ARM
27354 instruction. */
27355
27356static unsigned int
27357validate_immediate_twopart (unsigned int val,
27358 unsigned int * highpart)
27359{
27360 unsigned int a;
27361 unsigned int i;
27362
27363 for (i = 0; i < 32; i += 2)
27364 if (((a = rotate_left (val, i)) & 0xff) != 0)
27365 {
27366 if (a & 0xff00)
27367 {
27368 if (a & ~ 0xffff)
27369 continue;
27370 * highpart = (a >> 8) | ((i + 24) << 7);
27371 }
27372 else if (a & 0xff0000)
27373 {
27374 if (a & 0xff000000)
27375 continue;
27376 * highpart = (a >> 16) | ((i + 16) << 7);
27377 }
27378 else
27379 {
27380 gas_assert (a & 0xff000000);
27381 * highpart = (a >> 24) | ((i + 8) << 7);
27382 }
27383
27384 return (a & 0xff) | (i << 7);
27385 }
27386
27387 return FAIL;
27388}
27389
27390static int
27391validate_offset_imm (unsigned int val, int hwse)
27392{
27393 if ((hwse && val > 255) || val > 4095)
27394 return FAIL;
27395 return val;
27396}
27397
27398/* Subroutine of md_apply_fix. Do those data_ops which can take a
27399 negative immediate constant by altering the instruction. A bit of
27400 a hack really.
27401 MOV <-> MVN
27402 AND <-> BIC
27403 ADC <-> SBC
27404 by inverting the second operand, and
27405 ADD <-> SUB
27406 CMP <-> CMN
27407 by negating the second operand. */
27408
27409static int
27410negate_data_op (unsigned long * instruction,
27411 unsigned long value)
27412{
27413 int op, new_inst;
27414 unsigned long negated, inverted;
27415
27416 negated = encode_arm_immediate (-value);
27417 inverted = encode_arm_immediate (~value);
27418
27419 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
27420 switch (op)
27421 {
27422 /* First negates. */
27423 case OPCODE_SUB: /* ADD <-> SUB */
27424 new_inst = OPCODE_ADD;
27425 value = negated;
27426 break;
27427
27428 case OPCODE_ADD:
27429 new_inst = OPCODE_SUB;
27430 value = negated;
27431 break;
27432
27433 case OPCODE_CMP: /* CMP <-> CMN */
27434 new_inst = OPCODE_CMN;
27435 value = negated;
27436 break;
27437
27438 case OPCODE_CMN:
27439 new_inst = OPCODE_CMP;
27440 value = negated;
27441 break;
27442
27443 /* Now Inverted ops. */
27444 case OPCODE_MOV: /* MOV <-> MVN */
27445 new_inst = OPCODE_MVN;
27446 value = inverted;
27447 break;
27448
27449 case OPCODE_MVN:
27450 new_inst = OPCODE_MOV;
27451 value = inverted;
27452 break;
27453
27454 case OPCODE_AND: /* AND <-> BIC */
27455 new_inst = OPCODE_BIC;
27456 value = inverted;
27457 break;
27458
27459 case OPCODE_BIC:
27460 new_inst = OPCODE_AND;
27461 value = inverted;
27462 break;
27463
27464 case OPCODE_ADC: /* ADC <-> SBC */
27465 new_inst = OPCODE_SBC;
27466 value = inverted;
27467 break;
27468
27469 case OPCODE_SBC:
27470 new_inst = OPCODE_ADC;
27471 value = inverted;
27472 break;
27473
27474 /* We cannot do anything. */
27475 default:
27476 return FAIL;
27477 }
27478
27479 if (value == (unsigned) FAIL)
27480 return FAIL;
27481
27482 *instruction &= OPCODE_MASK;
27483 *instruction |= new_inst << DATA_OP_SHIFT;
27484 return value;
27485}
27486
27487/* Like negate_data_op, but for Thumb-2. */
27488
27489static unsigned int
27490thumb32_negate_data_op (offsetT *instruction, unsigned int value)
27491{
27492 int op, new_inst;
27493 int rd;
27494 unsigned int negated, inverted;
27495
27496 negated = encode_thumb32_immediate (-value);
27497 inverted = encode_thumb32_immediate (~value);
27498
27499 rd = (*instruction >> 8) & 0xf;
27500 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
27501 switch (op)
27502 {
27503 /* ADD <-> SUB. Includes CMP <-> CMN. */
27504 case T2_OPCODE_SUB:
27505 new_inst = T2_OPCODE_ADD;
27506 value = negated;
27507 break;
27508
27509 case T2_OPCODE_ADD:
27510 new_inst = T2_OPCODE_SUB;
27511 value = negated;
27512 break;
27513
27514 /* ORR <-> ORN. Includes MOV <-> MVN. */
27515 case T2_OPCODE_ORR:
27516 new_inst = T2_OPCODE_ORN;
27517 value = inverted;
27518 break;
27519
27520 case T2_OPCODE_ORN:
27521 new_inst = T2_OPCODE_ORR;
27522 value = inverted;
27523 break;
27524
27525 /* AND <-> BIC. TST has no inverted equivalent. */
27526 case T2_OPCODE_AND:
27527 new_inst = T2_OPCODE_BIC;
27528 if (rd == 15)
27529 value = FAIL;
27530 else
27531 value = inverted;
27532 break;
27533
27534 case T2_OPCODE_BIC:
27535 new_inst = T2_OPCODE_AND;
27536 value = inverted;
27537 break;
27538
27539 /* ADC <-> SBC */
27540 case T2_OPCODE_ADC:
27541 new_inst = T2_OPCODE_SBC;
27542 value = inverted;
27543 break;
27544
27545 case T2_OPCODE_SBC:
27546 new_inst = T2_OPCODE_ADC;
27547 value = inverted;
27548 break;
27549
27550 /* We cannot do anything. */
27551 default:
27552 return FAIL;
27553 }
27554
27555 if (value == (unsigned int)FAIL)
27556 return FAIL;
27557
27558 *instruction &= T2_OPCODE_MASK;
27559 *instruction |= new_inst << T2_DATA_OP_SHIFT;
27560 return value;
27561}
27562
27563/* Read a 32-bit thumb instruction from buf. */
27564
27565static unsigned long
27566get_thumb32_insn (char * buf)
27567{
27568 unsigned long insn;
27569 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
27570 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27571
27572 return insn;
27573}
27574
27575/* We usually want to set the low bit on the address of thumb function
27576 symbols. In particular .word foo - . should have the low bit set.
27577 Generic code tries to fold the difference of two symbols to
27578 a constant. Prevent this and force a relocation when the first symbols
27579 is a thumb function. */
27580
27581bfd_boolean
27582arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
27583{
27584 if (op == O_subtract
27585 && l->X_op == O_symbol
27586 && r->X_op == O_symbol
27587 && THUMB_IS_FUNC (l->X_add_symbol))
27588 {
27589 l->X_op = O_subtract;
27590 l->X_op_symbol = r->X_add_symbol;
27591 l->X_add_number -= r->X_add_number;
27592 return TRUE;
27593 }
27594
27595 /* Process as normal. */
27596 return FALSE;
27597}
27598
27599/* Encode Thumb2 unconditional branches and calls. The encoding
27600 for the 2 are identical for the immediate values. */
27601
27602static void
27603encode_thumb2_b_bl_offset (char * buf, offsetT value)
27604{
27605#define T2I1I2MASK ((1 << 13) | (1 << 11))
27606 offsetT newval;
27607 offsetT newval2;
27608 addressT S, I1, I2, lo, hi;
27609
27610 S = (value >> 24) & 0x01;
27611 I1 = (value >> 23) & 0x01;
27612 I2 = (value >> 22) & 0x01;
27613 hi = (value >> 12) & 0x3ff;
27614 lo = (value >> 1) & 0x7ff;
27615 newval = md_chars_to_number (buf, THUMB_SIZE);
27616 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27617 newval |= (S << 10) | hi;
27618 newval2 &= ~T2I1I2MASK;
27619 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
27620 md_number_to_chars (buf, newval, THUMB_SIZE);
27621 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
27622}
27623
27624void
27625md_apply_fix (fixS * fixP,
27626 valueT * valP,
27627 segT seg)
27628{
27629 offsetT value = * valP;
27630 offsetT newval;
27631 unsigned int newimm;
27632 unsigned long temp;
27633 int sign;
27634 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
27635
27636 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
27637
27638 /* Note whether this will delete the relocation. */
27639
27640 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
27641 fixP->fx_done = 1;
27642
27643 /* On a 64-bit host, silently truncate 'value' to 32 bits for
27644 consistency with the behaviour on 32-bit hosts. Remember value
27645 for emit_reloc. */
27646 value &= 0xffffffff;
27647 value ^= 0x80000000;
27648 value -= 0x80000000;
27649
27650 *valP = value;
27651 fixP->fx_addnumber = value;
27652
27653 /* Same treatment for fixP->fx_offset. */
27654 fixP->fx_offset &= 0xffffffff;
27655 fixP->fx_offset ^= 0x80000000;
27656 fixP->fx_offset -= 0x80000000;
27657
27658 switch (fixP->fx_r_type)
27659 {
27660 case BFD_RELOC_NONE:
27661 /* This will need to go in the object file. */
27662 fixP->fx_done = 0;
27663 break;
27664
27665 case BFD_RELOC_ARM_IMMEDIATE:
27666 /* We claim that this fixup has been processed here,
27667 even if in fact we generate an error because we do
27668 not have a reloc for it, so tc_gen_reloc will reject it. */
27669 fixP->fx_done = 1;
27670
27671 if (fixP->fx_addsy)
27672 {
27673 const char *msg = 0;
27674
27675 if (! S_IS_DEFINED (fixP->fx_addsy))
27676 msg = _("undefined symbol %s used as an immediate value");
27677 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27678 msg = _("symbol %s is in a different section");
27679 else if (S_IS_WEAK (fixP->fx_addsy))
27680 msg = _("symbol %s is weak and may be overridden later");
27681
27682 if (msg)
27683 {
27684 as_bad_where (fixP->fx_file, fixP->fx_line,
27685 msg, S_GET_NAME (fixP->fx_addsy));
27686 break;
27687 }
27688 }
27689
27690 temp = md_chars_to_number (buf, INSN_SIZE);
27691
27692 /* If the offset is negative, we should use encoding A2 for ADR. */
27693 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
27694 newimm = negate_data_op (&temp, value);
27695 else
27696 {
27697 newimm = encode_arm_immediate (value);
27698
27699 /* If the instruction will fail, see if we can fix things up by
27700 changing the opcode. */
27701 if (newimm == (unsigned int) FAIL)
27702 newimm = negate_data_op (&temp, value);
27703 /* MOV accepts both ARM modified immediate (A1 encoding) and
27704 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
27705 When disassembling, MOV is preferred when there is no encoding
27706 overlap. */
27707 if (newimm == (unsigned int) FAIL
27708 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
27709 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
27710 && !((temp >> SBIT_SHIFT) & 0x1)
27711 && value >= 0 && value <= 0xffff)
27712 {
27713 /* Clear bits[23:20] to change encoding from A1 to A2. */
27714 temp &= 0xff0fffff;
27715 /* Encoding high 4bits imm. Code below will encode the remaining
27716 low 12bits. */
27717 temp |= (value & 0x0000f000) << 4;
27718 newimm = value & 0x00000fff;
27719 }
27720 }
27721
27722 if (newimm == (unsigned int) FAIL)
27723 {
27724 as_bad_where (fixP->fx_file, fixP->fx_line,
27725 _("invalid constant (%lx) after fixup"),
27726 (unsigned long) value);
27727 break;
27728 }
27729
27730 newimm |= (temp & 0xfffff000);
27731 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27732 break;
27733
27734 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
27735 {
27736 unsigned int highpart = 0;
27737 unsigned int newinsn = 0xe1a00000; /* nop. */
27738
27739 if (fixP->fx_addsy)
27740 {
27741 const char *msg = 0;
27742
27743 if (! S_IS_DEFINED (fixP->fx_addsy))
27744 msg = _("undefined symbol %s used as an immediate value");
27745 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27746 msg = _("symbol %s is in a different section");
27747 else if (S_IS_WEAK (fixP->fx_addsy))
27748 msg = _("symbol %s is weak and may be overridden later");
27749
27750 if (msg)
27751 {
27752 as_bad_where (fixP->fx_file, fixP->fx_line,
27753 msg, S_GET_NAME (fixP->fx_addsy));
27754 break;
27755 }
27756 }
27757
27758 newimm = encode_arm_immediate (value);
27759 temp = md_chars_to_number (buf, INSN_SIZE);
27760
27761 /* If the instruction will fail, see if we can fix things up by
27762 changing the opcode. */
27763 if (newimm == (unsigned int) FAIL
27764 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
27765 {
27766 /* No ? OK - try using two ADD instructions to generate
27767 the value. */
27768 newimm = validate_immediate_twopart (value, & highpart);
27769
27770 /* Yes - then make sure that the second instruction is
27771 also an add. */
27772 if (newimm != (unsigned int) FAIL)
27773 newinsn = temp;
27774 /* Still No ? Try using a negated value. */
27775 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
27776 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
27777 /* Otherwise - give up. */
27778 else
27779 {
27780 as_bad_where (fixP->fx_file, fixP->fx_line,
27781 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
27782 (long) value);
27783 break;
27784 }
27785
27786 /* Replace the first operand in the 2nd instruction (which
27787 is the PC) with the destination register. We have
27788 already added in the PC in the first instruction and we
27789 do not want to do it again. */
27790 newinsn &= ~ 0xf0000;
27791 newinsn |= ((newinsn & 0x0f000) << 4);
27792 }
27793
27794 newimm |= (temp & 0xfffff000);
27795 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27796
27797 highpart |= (newinsn & 0xfffff000);
27798 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
27799 }
27800 break;
27801
27802 case BFD_RELOC_ARM_OFFSET_IMM:
27803 if (!fixP->fx_done && seg->use_rela_p)
27804 value = 0;
27805 /* Fall through. */
27806
27807 case BFD_RELOC_ARM_LITERAL:
27808 sign = value > 0;
27809
27810 if (value < 0)
27811 value = - value;
27812
27813 if (validate_offset_imm (value, 0) == FAIL)
27814 {
27815 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
27816 as_bad_where (fixP->fx_file, fixP->fx_line,
27817 _("invalid literal constant: pool needs to be closer"));
27818 else
27819 as_bad_where (fixP->fx_file, fixP->fx_line,
27820 _("bad immediate value for offset (%ld)"),
27821 (long) value);
27822 break;
27823 }
27824
27825 newval = md_chars_to_number (buf, INSN_SIZE);
27826 if (value == 0)
27827 newval &= 0xfffff000;
27828 else
27829 {
27830 newval &= 0xff7ff000;
27831 newval |= value | (sign ? INDEX_UP : 0);
27832 }
27833 md_number_to_chars (buf, newval, INSN_SIZE);
27834 break;
27835
27836 case BFD_RELOC_ARM_OFFSET_IMM8:
27837 case BFD_RELOC_ARM_HWLITERAL:
27838 sign = value > 0;
27839
27840 if (value < 0)
27841 value = - value;
27842
27843 if (validate_offset_imm (value, 1) == FAIL)
27844 {
27845 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
27846 as_bad_where (fixP->fx_file, fixP->fx_line,
27847 _("invalid literal constant: pool needs to be closer"));
27848 else
27849 as_bad_where (fixP->fx_file, fixP->fx_line,
27850 _("bad immediate value for 8-bit offset (%ld)"),
27851 (long) value);
27852 break;
27853 }
27854
27855 newval = md_chars_to_number (buf, INSN_SIZE);
27856 if (value == 0)
27857 newval &= 0xfffff0f0;
27858 else
27859 {
27860 newval &= 0xff7ff0f0;
27861 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
27862 }
27863 md_number_to_chars (buf, newval, INSN_SIZE);
27864 break;
27865
27866 case BFD_RELOC_ARM_T32_OFFSET_U8:
27867 if (value < 0 || value > 1020 || value % 4 != 0)
27868 as_bad_where (fixP->fx_file, fixP->fx_line,
27869 _("bad immediate value for offset (%ld)"), (long) value);
27870 value /= 4;
27871
27872 newval = md_chars_to_number (buf+2, THUMB_SIZE);
27873 newval |= value;
27874 md_number_to_chars (buf+2, newval, THUMB_SIZE);
27875 break;
27876
27877 case BFD_RELOC_ARM_T32_OFFSET_IMM:
27878 /* This is a complicated relocation used for all varieties of Thumb32
27879 load/store instruction with immediate offset:
27880
27881 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
27882 *4, optional writeback(W)
27883 (doubleword load/store)
27884
27885 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
27886 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
27887 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
27888 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
27889 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
27890
27891 Uppercase letters indicate bits that are already encoded at
27892 this point. Lowercase letters are our problem. For the
27893 second block of instructions, the secondary opcode nybble
27894 (bits 8..11) is present, and bit 23 is zero, even if this is
27895 a PC-relative operation. */
27896 newval = md_chars_to_number (buf, THUMB_SIZE);
27897 newval <<= 16;
27898 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
27899
27900 if ((newval & 0xf0000000) == 0xe0000000)
27901 {
27902 /* Doubleword load/store: 8-bit offset, scaled by 4. */
27903 if (value >= 0)
27904 newval |= (1 << 23);
27905 else
27906 value = -value;
27907 if (value % 4 != 0)
27908 {
27909 as_bad_where (fixP->fx_file, fixP->fx_line,
27910 _("offset not a multiple of 4"));
27911 break;
27912 }
27913 value /= 4;
27914 if (value > 0xff)
27915 {
27916 as_bad_where (fixP->fx_file, fixP->fx_line,
27917 _("offset out of range"));
27918 break;
27919 }
27920 newval &= ~0xff;
27921 }
27922 else if ((newval & 0x000f0000) == 0x000f0000)
27923 {
27924 /* PC-relative, 12-bit offset. */
27925 if (value >= 0)
27926 newval |= (1 << 23);
27927 else
27928 value = -value;
27929 if (value > 0xfff)
27930 {
27931 as_bad_where (fixP->fx_file, fixP->fx_line,
27932 _("offset out of range"));
27933 break;
27934 }
27935 newval &= ~0xfff;
27936 }
27937 else if ((newval & 0x00000100) == 0x00000100)
27938 {
27939 /* Writeback: 8-bit, +/- offset. */
27940 if (value >= 0)
27941 newval |= (1 << 9);
27942 else
27943 value = -value;
27944 if (value > 0xff)
27945 {
27946 as_bad_where (fixP->fx_file, fixP->fx_line,
27947 _("offset out of range"));
27948 break;
27949 }
27950 newval &= ~0xff;
27951 }
27952 else if ((newval & 0x00000f00) == 0x00000e00)
27953 {
27954 /* T-instruction: positive 8-bit offset. */
27955 if (value < 0 || value > 0xff)
27956 {
27957 as_bad_where (fixP->fx_file, fixP->fx_line,
27958 _("offset out of range"));
27959 break;
27960 }
27961 newval &= ~0xff;
27962 newval |= value;
27963 }
27964 else
27965 {
27966 /* Positive 12-bit or negative 8-bit offset. */
27967 int limit;
27968 if (value >= 0)
27969 {
27970 newval |= (1 << 23);
27971 limit = 0xfff;
27972 }
27973 else
27974 {
27975 value = -value;
27976 limit = 0xff;
27977 }
27978 if (value > limit)
27979 {
27980 as_bad_where (fixP->fx_file, fixP->fx_line,
27981 _("offset out of range"));
27982 break;
27983 }
27984 newval &= ~limit;
27985 }
27986
27987 newval |= value;
27988 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
27989 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
27990 break;
27991
27992 case BFD_RELOC_ARM_SHIFT_IMM:
27993 newval = md_chars_to_number (buf, INSN_SIZE);
27994 if (((unsigned long) value) > 32
27995 || (value == 32
27996 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
27997 {
27998 as_bad_where (fixP->fx_file, fixP->fx_line,
27999 _("shift expression is too large"));
28000 break;
28001 }
28002
28003 if (value == 0)
28004 /* Shifts of zero must be done as lsl. */
28005 newval &= ~0x60;
28006 else if (value == 32)
28007 value = 0;
28008 newval &= 0xfffff07f;
28009 newval |= (value & 0x1f) << 7;
28010 md_number_to_chars (buf, newval, INSN_SIZE);
28011 break;
28012
28013 case BFD_RELOC_ARM_T32_IMMEDIATE:
28014 case BFD_RELOC_ARM_T32_ADD_IMM:
28015 case BFD_RELOC_ARM_T32_IMM12:
28016 case BFD_RELOC_ARM_T32_ADD_PC12:
28017 /* We claim that this fixup has been processed here,
28018 even if in fact we generate an error because we do
28019 not have a reloc for it, so tc_gen_reloc will reject it. */
28020 fixP->fx_done = 1;
28021
28022 if (fixP->fx_addsy
28023 && ! S_IS_DEFINED (fixP->fx_addsy))
28024 {
28025 as_bad_where (fixP->fx_file, fixP->fx_line,
28026 _("undefined symbol %s used as an immediate value"),
28027 S_GET_NAME (fixP->fx_addsy));
28028 break;
28029 }
28030
28031 newval = md_chars_to_number (buf, THUMB_SIZE);
28032 newval <<= 16;
28033 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28034
28035 newimm = FAIL;
28036 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28037 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28038 Thumb2 modified immediate encoding (T2). */
28039 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28040 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28041 {
28042 newimm = encode_thumb32_immediate (value);
28043 if (newimm == (unsigned int) FAIL)
28044 newimm = thumb32_negate_data_op (&newval, value);
28045 }
28046 if (newimm == (unsigned int) FAIL)
28047 {
28048 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28049 {
28050 /* Turn add/sum into addw/subw. */
28051 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28052 newval = (newval & 0xfeffffff) | 0x02000000;
28053 /* No flat 12-bit imm encoding for addsw/subsw. */
28054 if ((newval & 0x00100000) == 0)
28055 {
28056 /* 12 bit immediate for addw/subw. */
28057 if (value < 0)
28058 {
28059 value = -value;
28060 newval ^= 0x00a00000;
28061 }
28062 if (value > 0xfff)
28063 newimm = (unsigned int) FAIL;
28064 else
28065 newimm = value;
28066 }
28067 }
28068 else
28069 {
28070 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28071 UINT16 (T3 encoding), MOVW only accepts UINT16. When
28072 disassembling, MOV is preferred when there is no encoding
28073 overlap. */
28074 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28075 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28076 but with the Rn field [19:16] set to 1111. */
28077 && (((newval >> 16) & 0xf) == 0xf)
28078 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28079 && !((newval >> T2_SBIT_SHIFT) & 0x1)
28080 && value >= 0 && value <= 0xffff)
28081 {
28082 /* Toggle bit[25] to change encoding from T2 to T3. */
28083 newval ^= 1 << 25;
28084 /* Clear bits[19:16]. */
28085 newval &= 0xfff0ffff;
28086 /* Encoding high 4bits imm. Code below will encode the
28087 remaining low 12bits. */
28088 newval |= (value & 0x0000f000) << 4;
28089 newimm = value & 0x00000fff;
28090 }
28091 }
28092 }
28093
28094 if (newimm == (unsigned int)FAIL)
28095 {
28096 as_bad_where (fixP->fx_file, fixP->fx_line,
28097 _("invalid constant (%lx) after fixup"),
28098 (unsigned long) value);
28099 break;
28100 }
28101
28102 newval |= (newimm & 0x800) << 15;
28103 newval |= (newimm & 0x700) << 4;
28104 newval |= (newimm & 0x0ff);
28105
28106 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28107 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28108 break;
28109
28110 case BFD_RELOC_ARM_SMC:
28111 if (((unsigned long) value) > 0xf)
28112 as_bad_where (fixP->fx_file, fixP->fx_line,
28113 _("invalid smc expression"));
28114
28115 newval = md_chars_to_number (buf, INSN_SIZE);
28116 newval |= (value & 0xf);
28117 md_number_to_chars (buf, newval, INSN_SIZE);
28118 break;
28119
28120 case BFD_RELOC_ARM_HVC:
28121 if (((unsigned long) value) > 0xffff)
28122 as_bad_where (fixP->fx_file, fixP->fx_line,
28123 _("invalid hvc expression"));
28124 newval = md_chars_to_number (buf, INSN_SIZE);
28125 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28126 md_number_to_chars (buf, newval, INSN_SIZE);
28127 break;
28128
28129 case BFD_RELOC_ARM_SWI:
28130 if (fixP->tc_fix_data != 0)
28131 {
28132 if (((unsigned long) value) > 0xff)
28133 as_bad_where (fixP->fx_file, fixP->fx_line,
28134 _("invalid swi expression"));
28135 newval = md_chars_to_number (buf, THUMB_SIZE);
28136 newval |= value;
28137 md_number_to_chars (buf, newval, THUMB_SIZE);
28138 }
28139 else
28140 {
28141 if (((unsigned long) value) > 0x00ffffff)
28142 as_bad_where (fixP->fx_file, fixP->fx_line,
28143 _("invalid swi expression"));
28144 newval = md_chars_to_number (buf, INSN_SIZE);
28145 newval |= value;
28146 md_number_to_chars (buf, newval, INSN_SIZE);
28147 }
28148 break;
28149
28150 case BFD_RELOC_ARM_MULTI:
28151 if (((unsigned long) value) > 0xffff)
28152 as_bad_where (fixP->fx_file, fixP->fx_line,
28153 _("invalid expression in load/store multiple"));
28154 newval = value | md_chars_to_number (buf, INSN_SIZE);
28155 md_number_to_chars (buf, newval, INSN_SIZE);
28156 break;
28157
28158#ifdef OBJ_ELF
28159 case BFD_RELOC_ARM_PCREL_CALL:
28160
28161 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28162 && fixP->fx_addsy
28163 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28164 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28165 && THUMB_IS_FUNC (fixP->fx_addsy))
28166 /* Flip the bl to blx. This is a simple flip
28167 bit here because we generate PCREL_CALL for
28168 unconditional bls. */
28169 {
28170 newval = md_chars_to_number (buf, INSN_SIZE);
28171 newval = newval | 0x10000000;
28172 md_number_to_chars (buf, newval, INSN_SIZE);
28173 temp = 1;
28174 fixP->fx_done = 1;
28175 }
28176 else
28177 temp = 3;
28178 goto arm_branch_common;
28179
28180 case BFD_RELOC_ARM_PCREL_JUMP:
28181 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28182 && fixP->fx_addsy
28183 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28184 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28185 && THUMB_IS_FUNC (fixP->fx_addsy))
28186 {
28187 /* This would map to a bl<cond>, b<cond>,
28188 b<always> to a Thumb function. We
28189 need to force a relocation for this particular
28190 case. */
28191 newval = md_chars_to_number (buf, INSN_SIZE);
28192 fixP->fx_done = 0;
28193 }
28194 /* Fall through. */
28195
28196 case BFD_RELOC_ARM_PLT32:
28197#endif
28198 case BFD_RELOC_ARM_PCREL_BRANCH:
28199 temp = 3;
28200 goto arm_branch_common;
28201
28202 case BFD_RELOC_ARM_PCREL_BLX:
28203
28204 temp = 1;
28205 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28206 && fixP->fx_addsy
28207 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28208 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28209 && ARM_IS_FUNC (fixP->fx_addsy))
28210 {
28211 /* Flip the blx to a bl and warn. */
28212 const char *name = S_GET_NAME (fixP->fx_addsy);
28213 newval = 0xeb000000;
28214 as_warn_where (fixP->fx_file, fixP->fx_line,
28215 _("blx to '%s' an ARM ISA state function changed to bl"),
28216 name);
28217 md_number_to_chars (buf, newval, INSN_SIZE);
28218 temp = 3;
28219 fixP->fx_done = 1;
28220 }
28221
28222#ifdef OBJ_ELF
28223 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28224 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28225#endif
28226
28227 arm_branch_common:
28228 /* We are going to store value (shifted right by two) in the
28229 instruction, in a 24 bit, signed field. Bits 26 through 32 either
28230 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
28231 also be clear. */
28232 if (value & temp)
28233 as_bad_where (fixP->fx_file, fixP->fx_line,
28234 _("misaligned branch destination"));
28235 if ((value & (offsetT)0xfe000000) != (offsetT)0
28236 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
28237 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28238
28239 if (fixP->fx_done || !seg->use_rela_p)
28240 {
28241 newval = md_chars_to_number (buf, INSN_SIZE);
28242 newval |= (value >> 2) & 0x00ffffff;
28243 /* Set the H bit on BLX instructions. */
28244 if (temp == 1)
28245 {
28246 if (value & 2)
28247 newval |= 0x01000000;
28248 else
28249 newval &= ~0x01000000;
28250 }
28251 md_number_to_chars (buf, newval, INSN_SIZE);
28252 }
28253 break;
28254
28255 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28256 /* CBZ can only branch forward. */
28257
28258 /* Attempts to use CBZ to branch to the next instruction
28259 (which, strictly speaking, are prohibited) will be turned into
28260 no-ops.
28261
28262 FIXME: It may be better to remove the instruction completely and
28263 perform relaxation. */
28264 if (value == -2)
28265 {
28266 newval = md_chars_to_number (buf, THUMB_SIZE);
28267 newval = 0xbf00; /* NOP encoding T1 */
28268 md_number_to_chars (buf, newval, THUMB_SIZE);
28269 }
28270 else
28271 {
28272 if (value & ~0x7e)
28273 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28274
28275 if (fixP->fx_done || !seg->use_rela_p)
28276 {
28277 newval = md_chars_to_number (buf, THUMB_SIZE);
28278 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28279 md_number_to_chars (buf, newval, THUMB_SIZE);
28280 }
28281 }
28282 break;
28283
28284 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
28285 if (out_of_range_p (value, 8))
28286 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28287
28288 if (fixP->fx_done || !seg->use_rela_p)
28289 {
28290 newval = md_chars_to_number (buf, THUMB_SIZE);
28291 newval |= (value & 0x1ff) >> 1;
28292 md_number_to_chars (buf, newval, THUMB_SIZE);
28293 }
28294 break;
28295
28296 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
28297 if (out_of_range_p (value, 11))
28298 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28299
28300 if (fixP->fx_done || !seg->use_rela_p)
28301 {
28302 newval = md_chars_to_number (buf, THUMB_SIZE);
28303 newval |= (value & 0xfff) >> 1;
28304 md_number_to_chars (buf, newval, THUMB_SIZE);
28305 }
28306 break;
28307
28308 /* This relocation is misnamed, it should be BRANCH21. */
28309 case BFD_RELOC_THUMB_PCREL_BRANCH20:
28310 if (fixP->fx_addsy
28311 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28312 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28313 && ARM_IS_FUNC (fixP->fx_addsy)
28314 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28315 {
28316 /* Force a relocation for a branch 20 bits wide. */
28317 fixP->fx_done = 0;
28318 }
28319 if (out_of_range_p (value, 20))
28320 as_bad_where (fixP->fx_file, fixP->fx_line,
28321 _("conditional branch out of range"));
28322
28323 if (fixP->fx_done || !seg->use_rela_p)
28324 {
28325 offsetT newval2;
28326 addressT S, J1, J2, lo, hi;
28327
28328 S = (value & 0x00100000) >> 20;
28329 J2 = (value & 0x00080000) >> 19;
28330 J1 = (value & 0x00040000) >> 18;
28331 hi = (value & 0x0003f000) >> 12;
28332 lo = (value & 0x00000ffe) >> 1;
28333
28334 newval = md_chars_to_number (buf, THUMB_SIZE);
28335 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28336 newval |= (S << 10) | hi;
28337 newval2 |= (J1 << 13) | (J2 << 11) | lo;
28338 md_number_to_chars (buf, newval, THUMB_SIZE);
28339 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28340 }
28341 break;
28342
28343 case BFD_RELOC_THUMB_PCREL_BLX:
28344 /* If there is a blx from a thumb state function to
28345 another thumb function flip this to a bl and warn
28346 about it. */
28347
28348 if (fixP->fx_addsy
28349 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28350 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28351 && THUMB_IS_FUNC (fixP->fx_addsy))
28352 {
28353 const char *name = S_GET_NAME (fixP->fx_addsy);
28354 as_warn_where (fixP->fx_file, fixP->fx_line,
28355 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
28356 name);
28357 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28358 newval = newval | 0x1000;
28359 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28360 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28361 fixP->fx_done = 1;
28362 }
28363
28364
28365 goto thumb_bl_common;
28366
28367 case BFD_RELOC_THUMB_PCREL_BRANCH23:
28368 /* A bl from Thumb state ISA to an internal ARM state function
28369 is converted to a blx. */
28370 if (fixP->fx_addsy
28371 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28372 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28373 && ARM_IS_FUNC (fixP->fx_addsy)
28374 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28375 {
28376 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28377 newval = newval & ~0x1000;
28378 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28379 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
28380 fixP->fx_done = 1;
28381 }
28382
28383 thumb_bl_common:
28384
28385 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28386 /* For a BLX instruction, make sure that the relocation is rounded up
28387 to a word boundary. This follows the semantics of the instruction
28388 which specifies that bit 1 of the target address will come from bit
28389 1 of the base address. */
28390 value = (value + 3) & ~ 3;
28391
28392#ifdef OBJ_ELF
28393 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
28394 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28395 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28396#endif
28397
28398 if (out_of_range_p (value, 22))
28399 {
28400 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
28401 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28402 else if (out_of_range_p (value, 24))
28403 as_bad_where (fixP->fx_file, fixP->fx_line,
28404 _("Thumb2 branch out of range"));
28405 }
28406
28407 if (fixP->fx_done || !seg->use_rela_p)
28408 encode_thumb2_b_bl_offset (buf, value);
28409
28410 break;
28411
28412 case BFD_RELOC_THUMB_PCREL_BRANCH25:
28413 if (out_of_range_p (value, 24))
28414 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28415
28416 if (fixP->fx_done || !seg->use_rela_p)
28417 encode_thumb2_b_bl_offset (buf, value);
28418
28419 break;
28420
28421 case BFD_RELOC_8:
28422 if (fixP->fx_done || !seg->use_rela_p)
28423 *buf = value;
28424 break;
28425
28426 case BFD_RELOC_16:
28427 if (fixP->fx_done || !seg->use_rela_p)
28428 md_number_to_chars (buf, value, 2);
28429 break;
28430
28431#ifdef OBJ_ELF
28432 case BFD_RELOC_ARM_TLS_CALL:
28433 case BFD_RELOC_ARM_THM_TLS_CALL:
28434 case BFD_RELOC_ARM_TLS_DESCSEQ:
28435 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
28436 case BFD_RELOC_ARM_TLS_GOTDESC:
28437 case BFD_RELOC_ARM_TLS_GD32:
28438 case BFD_RELOC_ARM_TLS_LE32:
28439 case BFD_RELOC_ARM_TLS_IE32:
28440 case BFD_RELOC_ARM_TLS_LDM32:
28441 case BFD_RELOC_ARM_TLS_LDO32:
28442 S_SET_THREAD_LOCAL (fixP->fx_addsy);
28443 break;
28444
28445 /* Same handling as above, but with the arm_fdpic guard. */
28446 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
28447 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
28448 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
28449 if (arm_fdpic)
28450 {
28451 S_SET_THREAD_LOCAL (fixP->fx_addsy);
28452 }
28453 else
28454 {
28455 as_bad_where (fixP->fx_file, fixP->fx_line,
28456 _("Relocation supported only in FDPIC mode"));
28457 }
28458 break;
28459
28460 case BFD_RELOC_ARM_GOT32:
28461 case BFD_RELOC_ARM_GOTOFF:
28462 break;
28463
28464 case BFD_RELOC_ARM_GOT_PREL:
28465 if (fixP->fx_done || !seg->use_rela_p)
28466 md_number_to_chars (buf, value, 4);
28467 break;
28468
28469 case BFD_RELOC_ARM_TARGET2:
28470 /* TARGET2 is not partial-inplace, so we need to write the
28471 addend here for REL targets, because it won't be written out
28472 during reloc processing later. */
28473 if (fixP->fx_done || !seg->use_rela_p)
28474 md_number_to_chars (buf, fixP->fx_offset, 4);
28475 break;
28476
28477 /* Relocations for FDPIC. */
28478 case BFD_RELOC_ARM_GOTFUNCDESC:
28479 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
28480 case BFD_RELOC_ARM_FUNCDESC:
28481 if (arm_fdpic)
28482 {
28483 if (fixP->fx_done || !seg->use_rela_p)
28484 md_number_to_chars (buf, 0, 4);
28485 }
28486 else
28487 {
28488 as_bad_where (fixP->fx_file, fixP->fx_line,
28489 _("Relocation supported only in FDPIC mode"));
28490 }
28491 break;
28492#endif
28493
28494 case BFD_RELOC_RVA:
28495 case BFD_RELOC_32:
28496 case BFD_RELOC_ARM_TARGET1:
28497 case BFD_RELOC_ARM_ROSEGREL32:
28498 case BFD_RELOC_ARM_SBREL32:
28499 case BFD_RELOC_32_PCREL:
28500#ifdef TE_PE
28501 case BFD_RELOC_32_SECREL:
28502#endif
28503 if (fixP->fx_done || !seg->use_rela_p)
28504#ifdef TE_WINCE
28505 /* For WinCE we only do this for pcrel fixups. */
28506 if (fixP->fx_done || fixP->fx_pcrel)
28507#endif
28508 md_number_to_chars (buf, value, 4);
28509 break;
28510
28511#ifdef OBJ_ELF
28512 case BFD_RELOC_ARM_PREL31:
28513 if (fixP->fx_done || !seg->use_rela_p)
28514 {
28515 newval = md_chars_to_number (buf, 4) & 0x80000000;
28516 if ((value ^ (value >> 1)) & 0x40000000)
28517 {
28518 as_bad_where (fixP->fx_file, fixP->fx_line,
28519 _("rel31 relocation overflow"));
28520 }
28521 newval |= value & 0x7fffffff;
28522 md_number_to_chars (buf, newval, 4);
28523 }
28524 break;
28525#endif
28526
28527 case BFD_RELOC_ARM_CP_OFF_IMM:
28528 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
28529 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
28530 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
28531 newval = md_chars_to_number (buf, INSN_SIZE);
28532 else
28533 newval = get_thumb32_insn (buf);
28534 if ((newval & 0x0f200f00) == 0x0d000900)
28535 {
28536 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
28537 has permitted values that are multiples of 2, in the range 0
28538 to 510. */
28539 if (value < -510 || value > 510 || (value & 1))
28540 as_bad_where (fixP->fx_file, fixP->fx_line,
28541 _("co-processor offset out of range"));
28542 }
28543 else if ((newval & 0xfe001f80) == 0xec000f80)
28544 {
28545 if (value < -511 || value > 512 || (value & 3))
28546 as_bad_where (fixP->fx_file, fixP->fx_line,
28547 _("co-processor offset out of range"));
28548 }
28549 else if (value < -1023 || value > 1023 || (value & 3))
28550 as_bad_where (fixP->fx_file, fixP->fx_line,
28551 _("co-processor offset out of range"));
28552 cp_off_common:
28553 sign = value > 0;
28554 if (value < 0)
28555 value = -value;
28556 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28557 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28558 newval = md_chars_to_number (buf, INSN_SIZE);
28559 else
28560 newval = get_thumb32_insn (buf);
28561 if (value == 0)
28562 {
28563 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28564 newval &= 0xffffff80;
28565 else
28566 newval &= 0xffffff00;
28567 }
28568 else
28569 {
28570 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28571 newval &= 0xff7fff80;
28572 else
28573 newval &= 0xff7fff00;
28574 if ((newval & 0x0f200f00) == 0x0d000900)
28575 {
28576 /* This is a fp16 vstr/vldr.
28577
28578 It requires the immediate offset in the instruction is shifted
28579 left by 1 to be a half-word offset.
28580
28581 Here, left shift by 1 first, and later right shift by 2
28582 should get the right offset. */
28583 value <<= 1;
28584 }
28585 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
28586 }
28587 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28588 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28589 md_number_to_chars (buf, newval, INSN_SIZE);
28590 else
28591 put_thumb32_insn (buf, newval);
28592 break;
28593
28594 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
28595 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
28596 if (value < -255 || value > 255)
28597 as_bad_where (fixP->fx_file, fixP->fx_line,
28598 _("co-processor offset out of range"));
28599 value *= 4;
28600 goto cp_off_common;
28601
28602 case BFD_RELOC_ARM_THUMB_OFFSET:
28603 newval = md_chars_to_number (buf, THUMB_SIZE);
28604 /* Exactly what ranges, and where the offset is inserted depends
28605 on the type of instruction, we can establish this from the
28606 top 4 bits. */
28607 switch (newval >> 12)
28608 {
28609 case 4: /* PC load. */
28610 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
28611 forced to zero for these loads; md_pcrel_from has already
28612 compensated for this. */
28613 if (value & 3)
28614 as_bad_where (fixP->fx_file, fixP->fx_line,
28615 _("invalid offset, target not word aligned (0x%08lX)"),
28616 (((unsigned long) fixP->fx_frag->fr_address
28617 + (unsigned long) fixP->fx_where) & ~3)
28618 + (unsigned long) value);
28619
28620 if (value & ~0x3fc)
28621 as_bad_where (fixP->fx_file, fixP->fx_line,
28622 _("invalid offset, value too big (0x%08lX)"),
28623 (long) value);
28624
28625 newval |= value >> 2;
28626 break;
28627
28628 case 9: /* SP load/store. */
28629 if (value & ~0x3fc)
28630 as_bad_where (fixP->fx_file, fixP->fx_line,
28631 _("invalid offset, value too big (0x%08lX)"),
28632 (long) value);
28633 newval |= value >> 2;
28634 break;
28635
28636 case 6: /* Word load/store. */
28637 if (value & ~0x7c)
28638 as_bad_where (fixP->fx_file, fixP->fx_line,
28639 _("invalid offset, value too big (0x%08lX)"),
28640 (long) value);
28641 newval |= value << 4; /* 6 - 2. */
28642 break;
28643
28644 case 7: /* Byte load/store. */
28645 if (value & ~0x1f)
28646 as_bad_where (fixP->fx_file, fixP->fx_line,
28647 _("invalid offset, value too big (0x%08lX)"),
28648 (long) value);
28649 newval |= value << 6;
28650 break;
28651
28652 case 8: /* Halfword load/store. */
28653 if (value & ~0x3e)
28654 as_bad_where (fixP->fx_file, fixP->fx_line,
28655 _("invalid offset, value too big (0x%08lX)"),
28656 (long) value);
28657 newval |= value << 5; /* 6 - 1. */
28658 break;
28659
28660 default:
28661 as_bad_where (fixP->fx_file, fixP->fx_line,
28662 "Unable to process relocation for thumb opcode: %lx",
28663 (unsigned long) newval);
28664 break;
28665 }
28666 md_number_to_chars (buf, newval, THUMB_SIZE);
28667 break;
28668
28669 case BFD_RELOC_ARM_THUMB_ADD:
28670 /* This is a complicated relocation, since we use it for all of
28671 the following immediate relocations:
28672
28673 3bit ADD/SUB
28674 8bit ADD/SUB
28675 9bit ADD/SUB SP word-aligned
28676 10bit ADD PC/SP word-aligned
28677
28678 The type of instruction being processed is encoded in the
28679 instruction field:
28680
28681 0x8000 SUB
28682 0x00F0 Rd
28683 0x000F Rs
28684 */
28685 newval = md_chars_to_number (buf, THUMB_SIZE);
28686 {
28687 int rd = (newval >> 4) & 0xf;
28688 int rs = newval & 0xf;
28689 int subtract = !!(newval & 0x8000);
28690
28691 /* Check for HI regs, only very restricted cases allowed:
28692 Adjusting SP, and using PC or SP to get an address. */
28693 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
28694 || (rs > 7 && rs != REG_SP && rs != REG_PC))
28695 as_bad_where (fixP->fx_file, fixP->fx_line,
28696 _("invalid Hi register with immediate"));
28697
28698 /* If value is negative, choose the opposite instruction. */
28699 if (value < 0)
28700 {
28701 value = -value;
28702 subtract = !subtract;
28703 if (value < 0)
28704 as_bad_where (fixP->fx_file, fixP->fx_line,
28705 _("immediate value out of range"));
28706 }
28707
28708 if (rd == REG_SP)
28709 {
28710 if (value & ~0x1fc)
28711 as_bad_where (fixP->fx_file, fixP->fx_line,
28712 _("invalid immediate for stack address calculation"));
28713 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
28714 newval |= value >> 2;
28715 }
28716 else if (rs == REG_PC || rs == REG_SP)
28717 {
28718 /* PR gas/18541. If the addition is for a defined symbol
28719 within range of an ADR instruction then accept it. */
28720 if (subtract
28721 && value == 4
28722 && fixP->fx_addsy != NULL)
28723 {
28724 subtract = 0;
28725
28726 if (! S_IS_DEFINED (fixP->fx_addsy)
28727 || S_GET_SEGMENT (fixP->fx_addsy) != seg
28728 || S_IS_WEAK (fixP->fx_addsy))
28729 {
28730 as_bad_where (fixP->fx_file, fixP->fx_line,
28731 _("address calculation needs a strongly defined nearby symbol"));
28732 }
28733 else
28734 {
28735 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
28736
28737 /* Round up to the next 4-byte boundary. */
28738 if (v & 3)
28739 v = (v + 3) & ~ 3;
28740 else
28741 v += 4;
28742 v = S_GET_VALUE (fixP->fx_addsy) - v;
28743
28744 if (v & ~0x3fc)
28745 {
28746 as_bad_where (fixP->fx_file, fixP->fx_line,
28747 _("symbol too far away"));
28748 }
28749 else
28750 {
28751 fixP->fx_done = 1;
28752 value = v;
28753 }
28754 }
28755 }
28756
28757 if (subtract || value & ~0x3fc)
28758 as_bad_where (fixP->fx_file, fixP->fx_line,
28759 _("invalid immediate for address calculation (value = 0x%08lX)"),
28760 (unsigned long) (subtract ? - value : value));
28761 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
28762 newval |= rd << 8;
28763 newval |= value >> 2;
28764 }
28765 else if (rs == rd)
28766 {
28767 if (value & ~0xff)
28768 as_bad_where (fixP->fx_file, fixP->fx_line,
28769 _("immediate value out of range"));
28770 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
28771 newval |= (rd << 8) | value;
28772 }
28773 else
28774 {
28775 if (value & ~0x7)
28776 as_bad_where (fixP->fx_file, fixP->fx_line,
28777 _("immediate value out of range"));
28778 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
28779 newval |= rd | (rs << 3) | (value << 6);
28780 }
28781 }
28782 md_number_to_chars (buf, newval, THUMB_SIZE);
28783 break;
28784
28785 case BFD_RELOC_ARM_THUMB_IMM:
28786 newval = md_chars_to_number (buf, THUMB_SIZE);
28787 if (value < 0 || value > 255)
28788 as_bad_where (fixP->fx_file, fixP->fx_line,
28789 _("invalid immediate: %ld is out of range"),
28790 (long) value);
28791 newval |= value;
28792 md_number_to_chars (buf, newval, THUMB_SIZE);
28793 break;
28794
28795 case BFD_RELOC_ARM_THUMB_SHIFT:
28796 /* 5bit shift value (0..32). LSL cannot take 32. */
28797 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
28798 temp = newval & 0xf800;
28799 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
28800 as_bad_where (fixP->fx_file, fixP->fx_line,
28801 _("invalid shift value: %ld"), (long) value);
28802 /* Shifts of zero must be encoded as LSL. */
28803 if (value == 0)
28804 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
28805 /* Shifts of 32 are encoded as zero. */
28806 else if (value == 32)
28807 value = 0;
28808 newval |= value << 6;
28809 md_number_to_chars (buf, newval, THUMB_SIZE);
28810 break;
28811
28812 case BFD_RELOC_VTABLE_INHERIT:
28813 case BFD_RELOC_VTABLE_ENTRY:
28814 fixP->fx_done = 0;
28815 return;
28816
28817 case BFD_RELOC_ARM_MOVW:
28818 case BFD_RELOC_ARM_MOVT:
28819 case BFD_RELOC_ARM_THUMB_MOVW:
28820 case BFD_RELOC_ARM_THUMB_MOVT:
28821 if (fixP->fx_done || !seg->use_rela_p)
28822 {
28823 /* REL format relocations are limited to a 16-bit addend. */
28824 if (!fixP->fx_done)
28825 {
28826 if (value < -0x8000 || value > 0x7fff)
28827 as_bad_where (fixP->fx_file, fixP->fx_line,
28828 _("offset out of range"));
28829 }
28830 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
28831 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28832 {
28833 value >>= 16;
28834 }
28835
28836 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
28837 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28838 {
28839 newval = get_thumb32_insn (buf);
28840 newval &= 0xfbf08f00;
28841 newval |= (value & 0xf000) << 4;
28842 newval |= (value & 0x0800) << 15;
28843 newval |= (value & 0x0700) << 4;
28844 newval |= (value & 0x00ff);
28845 put_thumb32_insn (buf, newval);
28846 }
28847 else
28848 {
28849 newval = md_chars_to_number (buf, 4);
28850 newval &= 0xfff0f000;
28851 newval |= value & 0x0fff;
28852 newval |= (value & 0xf000) << 4;
28853 md_number_to_chars (buf, newval, 4);
28854 }
28855 }
28856 return;
28857
28858 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
28859 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
28860 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
28861 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
28862 gas_assert (!fixP->fx_done);
28863 {
28864 bfd_vma insn;
28865 bfd_boolean is_mov;
28866 bfd_vma encoded_addend = value;
28867
28868 /* Check that addend can be encoded in instruction. */
28869 if (!seg->use_rela_p && (value < 0 || value > 255))
28870 as_bad_where (fixP->fx_file, fixP->fx_line,
28871 _("the offset 0x%08lX is not representable"),
28872 (unsigned long) encoded_addend);
28873
28874 /* Extract the instruction. */
28875 insn = md_chars_to_number (buf, THUMB_SIZE);
28876 is_mov = (insn & 0xf800) == 0x2000;
28877
28878 /* Encode insn. */
28879 if (is_mov)
28880 {
28881 if (!seg->use_rela_p)
28882 insn |= encoded_addend;
28883 }
28884 else
28885 {
28886 int rd, rs;
28887
28888 /* Extract the instruction. */
28889 /* Encoding is the following
28890 0x8000 SUB
28891 0x00F0 Rd
28892 0x000F Rs
28893 */
28894 /* The following conditions must be true :
28895 - ADD
28896 - Rd == Rs
28897 - Rd <= 7
28898 */
28899 rd = (insn >> 4) & 0xf;
28900 rs = insn & 0xf;
28901 if ((insn & 0x8000) || (rd != rs) || rd > 7)
28902 as_bad_where (fixP->fx_file, fixP->fx_line,
28903 _("Unable to process relocation for thumb opcode: %lx"),
28904 (unsigned long) insn);
28905
28906 /* Encode as ADD immediate8 thumb 1 code. */
28907 insn = 0x3000 | (rd << 8);
28908
28909 /* Place the encoded addend into the first 8 bits of the
28910 instruction. */
28911 if (!seg->use_rela_p)
28912 insn |= encoded_addend;
28913 }
28914
28915 /* Update the instruction. */
28916 md_number_to_chars (buf, insn, THUMB_SIZE);
28917 }
28918 break;
28919
28920 case BFD_RELOC_ARM_ALU_PC_G0_NC:
28921 case BFD_RELOC_ARM_ALU_PC_G0:
28922 case BFD_RELOC_ARM_ALU_PC_G1_NC:
28923 case BFD_RELOC_ARM_ALU_PC_G1:
28924 case BFD_RELOC_ARM_ALU_PC_G2:
28925 case BFD_RELOC_ARM_ALU_SB_G0_NC:
28926 case BFD_RELOC_ARM_ALU_SB_G0:
28927 case BFD_RELOC_ARM_ALU_SB_G1_NC:
28928 case BFD_RELOC_ARM_ALU_SB_G1:
28929 case BFD_RELOC_ARM_ALU_SB_G2:
28930 gas_assert (!fixP->fx_done);
28931 if (!seg->use_rela_p)
28932 {
28933 bfd_vma insn;
28934 bfd_vma encoded_addend;
28935 bfd_vma addend_abs = llabs (value);
28936
28937 /* Check that the absolute value of the addend can be
28938 expressed as an 8-bit constant plus a rotation. */
28939 encoded_addend = encode_arm_immediate (addend_abs);
28940 if (encoded_addend == (unsigned int) FAIL)
28941 as_bad_where (fixP->fx_file, fixP->fx_line,
28942 _("the offset 0x%08lX is not representable"),
28943 (unsigned long) addend_abs);
28944
28945 /* Extract the instruction. */
28946 insn = md_chars_to_number (buf, INSN_SIZE);
28947
28948 /* If the addend is positive, use an ADD instruction.
28949 Otherwise use a SUB. Take care not to destroy the S bit. */
28950 insn &= 0xff1fffff;
28951 if (value < 0)
28952 insn |= 1 << 22;
28953 else
28954 insn |= 1 << 23;
28955
28956 /* Place the encoded addend into the first 12 bits of the
28957 instruction. */
28958 insn &= 0xfffff000;
28959 insn |= encoded_addend;
28960
28961 /* Update the instruction. */
28962 md_number_to_chars (buf, insn, INSN_SIZE);
28963 }
28964 break;
28965
28966 case BFD_RELOC_ARM_LDR_PC_G0:
28967 case BFD_RELOC_ARM_LDR_PC_G1:
28968 case BFD_RELOC_ARM_LDR_PC_G2:
28969 case BFD_RELOC_ARM_LDR_SB_G0:
28970 case BFD_RELOC_ARM_LDR_SB_G1:
28971 case BFD_RELOC_ARM_LDR_SB_G2:
28972 gas_assert (!fixP->fx_done);
28973 if (!seg->use_rela_p)
28974 {
28975 bfd_vma insn;
28976 bfd_vma addend_abs = llabs (value);
28977
28978 /* Check that the absolute value of the addend can be
28979 encoded in 12 bits. */
28980 if (addend_abs >= 0x1000)
28981 as_bad_where (fixP->fx_file, fixP->fx_line,
28982 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
28983 (unsigned long) addend_abs);
28984
28985 /* Extract the instruction. */
28986 insn = md_chars_to_number (buf, INSN_SIZE);
28987
28988 /* If the addend is negative, clear bit 23 of the instruction.
28989 Otherwise set it. */
28990 if (value < 0)
28991 insn &= ~(1 << 23);
28992 else
28993 insn |= 1 << 23;
28994
28995 /* Place the absolute value of the addend into the first 12 bits
28996 of the instruction. */
28997 insn &= 0xfffff000;
28998 insn |= addend_abs;
28999
29000 /* Update the instruction. */
29001 md_number_to_chars (buf, insn, INSN_SIZE);
29002 }
29003 break;
29004
29005 case BFD_RELOC_ARM_LDRS_PC_G0:
29006 case BFD_RELOC_ARM_LDRS_PC_G1:
29007 case BFD_RELOC_ARM_LDRS_PC_G2:
29008 case BFD_RELOC_ARM_LDRS_SB_G0:
29009 case BFD_RELOC_ARM_LDRS_SB_G1:
29010 case BFD_RELOC_ARM_LDRS_SB_G2:
29011 gas_assert (!fixP->fx_done);
29012 if (!seg->use_rela_p)
29013 {
29014 bfd_vma insn;
29015 bfd_vma addend_abs = llabs (value);
29016
29017 /* Check that the absolute value of the addend can be
29018 encoded in 8 bits. */
29019 if (addend_abs >= 0x100)
29020 as_bad_where (fixP->fx_file, fixP->fx_line,
29021 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29022 (unsigned long) addend_abs);
29023
29024 /* Extract the instruction. */
29025 insn = md_chars_to_number (buf, INSN_SIZE);
29026
29027 /* If the addend is negative, clear bit 23 of the instruction.
29028 Otherwise set it. */
29029 if (value < 0)
29030 insn &= ~(1 << 23);
29031 else
29032 insn |= 1 << 23;
29033
29034 /* Place the first four bits of the absolute value of the addend
29035 into the first 4 bits of the instruction, and the remaining
29036 four into bits 8 .. 11. */
29037 insn &= 0xfffff0f0;
29038 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29039
29040 /* Update the instruction. */
29041 md_number_to_chars (buf, insn, INSN_SIZE);
29042 }
29043 break;
29044
29045 case BFD_RELOC_ARM_LDC_PC_G0:
29046 case BFD_RELOC_ARM_LDC_PC_G1:
29047 case BFD_RELOC_ARM_LDC_PC_G2:
29048 case BFD_RELOC_ARM_LDC_SB_G0:
29049 case BFD_RELOC_ARM_LDC_SB_G1:
29050 case BFD_RELOC_ARM_LDC_SB_G2:
29051 gas_assert (!fixP->fx_done);
29052 if (!seg->use_rela_p)
29053 {
29054 bfd_vma insn;
29055 bfd_vma addend_abs = llabs (value);
29056
29057 /* Check that the absolute value of the addend is a multiple of
29058 four and, when divided by four, fits in 8 bits. */
29059 if (addend_abs & 0x3)
29060 as_bad_where (fixP->fx_file, fixP->fx_line,
29061 _("bad offset 0x%08lX (must be word-aligned)"),
29062 (unsigned long) addend_abs);
29063
29064 if ((addend_abs >> 2) > 0xff)
29065 as_bad_where (fixP->fx_file, fixP->fx_line,
29066 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29067 (unsigned long) addend_abs);
29068
29069 /* Extract the instruction. */
29070 insn = md_chars_to_number (buf, INSN_SIZE);
29071
29072 /* If the addend is negative, clear bit 23 of the instruction.
29073 Otherwise set it. */
29074 if (value < 0)
29075 insn &= ~(1 << 23);
29076 else
29077 insn |= 1 << 23;
29078
29079 /* Place the addend (divided by four) into the first eight
29080 bits of the instruction. */
29081 insn &= 0xfffffff0;
29082 insn |= addend_abs >> 2;
29083
29084 /* Update the instruction. */
29085 md_number_to_chars (buf, insn, INSN_SIZE);
29086 }
29087 break;
29088
29089 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29090 if (fixP->fx_addsy
29091 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29092 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29093 && ARM_IS_FUNC (fixP->fx_addsy)
29094 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29095 {
29096 /* Force a relocation for a branch 5 bits wide. */
29097 fixP->fx_done = 0;
29098 }
29099 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
29100 as_bad_where (fixP->fx_file, fixP->fx_line,
29101 BAD_BRANCH_OFF);
29102
29103 if (fixP->fx_done || !seg->use_rela_p)
29104 {
29105 addressT boff = value >> 1;
29106
29107 newval = md_chars_to_number (buf, THUMB_SIZE);
29108 newval |= (boff << 7);
29109 md_number_to_chars (buf, newval, THUMB_SIZE);
29110 }
29111 break;
29112
29113 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29114 if (fixP->fx_addsy
29115 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29116 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29117 && ARM_IS_FUNC (fixP->fx_addsy)
29118 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29119 {
29120 fixP->fx_done = 0;
29121 }
29122 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
29123 as_bad_where (fixP->fx_file, fixP->fx_line,
29124 _("branch out of range"));
29125
29126 if (fixP->fx_done || !seg->use_rela_p)
29127 {
29128 newval = md_chars_to_number (buf, THUMB_SIZE);
29129
29130 addressT boff = ((newval & 0x0780) >> 7) << 1;
29131 addressT diff = value - boff;
29132
29133 if (diff == 4)
29134 {
29135 newval |= 1 << 1; /* T bit. */
29136 }
29137 else if (diff != 2)
29138 {
29139 as_bad_where (fixP->fx_file, fixP->fx_line,
29140 _("out of range label-relative fixup value"));
29141 }
29142 md_number_to_chars (buf, newval, THUMB_SIZE);
29143 }
29144 break;
29145
29146 case BFD_RELOC_ARM_THUMB_BF17:
29147 if (fixP->fx_addsy
29148 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29149 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29150 && ARM_IS_FUNC (fixP->fx_addsy)
29151 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29152 {
29153 /* Force a relocation for a branch 17 bits wide. */
29154 fixP->fx_done = 0;
29155 }
29156
29157 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
29158 as_bad_where (fixP->fx_file, fixP->fx_line,
29159 BAD_BRANCH_OFF);
29160
29161 if (fixP->fx_done || !seg->use_rela_p)
29162 {
29163 offsetT newval2;
29164 addressT immA, immB, immC;
29165
29166 immA = (value & 0x0001f000) >> 12;
29167 immB = (value & 0x00000ffc) >> 2;
29168 immC = (value & 0x00000002) >> 1;
29169
29170 newval = md_chars_to_number (buf, THUMB_SIZE);
29171 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29172 newval |= immA;
29173 newval2 |= (immC << 11) | (immB << 1);
29174 md_number_to_chars (buf, newval, THUMB_SIZE);
29175 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29176 }
29177 break;
29178
29179 case BFD_RELOC_ARM_THUMB_BF19:
29180 if (fixP->fx_addsy
29181 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29182 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29183 && ARM_IS_FUNC (fixP->fx_addsy)
29184 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29185 {
29186 /* Force a relocation for a branch 19 bits wide. */
29187 fixP->fx_done = 0;
29188 }
29189
29190 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
29191 as_bad_where (fixP->fx_file, fixP->fx_line,
29192 BAD_BRANCH_OFF);
29193
29194 if (fixP->fx_done || !seg->use_rela_p)
29195 {
29196 offsetT newval2;
29197 addressT immA, immB, immC;
29198
29199 immA = (value & 0x0007f000) >> 12;
29200 immB = (value & 0x00000ffc) >> 2;
29201 immC = (value & 0x00000002) >> 1;
29202
29203 newval = md_chars_to_number (buf, THUMB_SIZE);
29204 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29205 newval |= immA;
29206 newval2 |= (immC << 11) | (immB << 1);
29207 md_number_to_chars (buf, newval, THUMB_SIZE);
29208 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29209 }
29210 break;
29211
29212 case BFD_RELOC_ARM_THUMB_BF13:
29213 if (fixP->fx_addsy
29214 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29215 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29216 && ARM_IS_FUNC (fixP->fx_addsy)
29217 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29218 {
29219 /* Force a relocation for a branch 13 bits wide. */
29220 fixP->fx_done = 0;
29221 }
29222
29223 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
29224 as_bad_where (fixP->fx_file, fixP->fx_line,
29225 BAD_BRANCH_OFF);
29226
29227 if (fixP->fx_done || !seg->use_rela_p)
29228 {
29229 offsetT newval2;
29230 addressT immA, immB, immC;
29231
29232 immA = (value & 0x00001000) >> 12;
29233 immB = (value & 0x00000ffc) >> 2;
29234 immC = (value & 0x00000002) >> 1;
29235
29236 newval = md_chars_to_number (buf, THUMB_SIZE);
29237 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29238 newval |= immA;
29239 newval2 |= (immC << 11) | (immB << 1);
29240 md_number_to_chars (buf, newval, THUMB_SIZE);
29241 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29242 }
29243 break;
29244
29245 case BFD_RELOC_ARM_THUMB_LOOP12:
29246 if (fixP->fx_addsy
29247 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29248 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29249 && ARM_IS_FUNC (fixP->fx_addsy)
29250 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29251 {
29252 /* Force a relocation for a branch 12 bits wide. */
29253 fixP->fx_done = 0;
29254 }
29255
29256 bfd_vma insn = get_thumb32_insn (buf);
29257 /* le lr, <label>, le <label> or letp lr, <label> */
29258 if (((insn & 0xffffffff) == 0xf00fc001)
29259 || ((insn & 0xffffffff) == 0xf02fc001)
29260 || ((insn & 0xffffffff) == 0xf01fc001))
29261 value = -value;
29262
29263 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
29264 as_bad_where (fixP->fx_file, fixP->fx_line,
29265 BAD_BRANCH_OFF);
29266 if (fixP->fx_done || !seg->use_rela_p)
29267 {
29268 addressT imml, immh;
29269
29270 immh = (value & 0x00000ffc) >> 2;
29271 imml = (value & 0x00000002) >> 1;
29272
29273 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29274 newval |= (imml << 11) | (immh << 1);
29275 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29276 }
29277 break;
29278
29279 case BFD_RELOC_ARM_V4BX:
29280 /* This will need to go in the object file. */
29281 fixP->fx_done = 0;
29282 break;
29283
29284 case BFD_RELOC_UNUSED:
29285 default:
29286 as_bad_where (fixP->fx_file, fixP->fx_line,
29287 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
29288 }
29289}
29290
29291/* Translate internal representation of relocation info to BFD target
29292 format. */
29293
29294arelent *
29295tc_gen_reloc (asection *section, fixS *fixp)
29296{
29297 arelent * reloc;
29298 bfd_reloc_code_real_type code;
29299
29300 reloc = XNEW (arelent);
29301
29302 reloc->sym_ptr_ptr = XNEW (asymbol *);
29303 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
29304 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
29305
29306 if (fixp->fx_pcrel)
29307 {
29308 if (section->use_rela_p)
29309 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
29310 else
29311 fixp->fx_offset = reloc->address;
29312 }
29313 reloc->addend = fixp->fx_offset;
29314
29315 switch (fixp->fx_r_type)
29316 {
29317 case BFD_RELOC_8:
29318 if (fixp->fx_pcrel)
29319 {
29320 code = BFD_RELOC_8_PCREL;
29321 break;
29322 }
29323 /* Fall through. */
29324
29325 case BFD_RELOC_16:
29326 if (fixp->fx_pcrel)
29327 {
29328 code = BFD_RELOC_16_PCREL;
29329 break;
29330 }
29331 /* Fall through. */
29332
29333 case BFD_RELOC_32:
29334 if (fixp->fx_pcrel)
29335 {
29336 code = BFD_RELOC_32_PCREL;
29337 break;
29338 }
29339 /* Fall through. */
29340
29341 case BFD_RELOC_ARM_MOVW:
29342 if (fixp->fx_pcrel)
29343 {
29344 code = BFD_RELOC_ARM_MOVW_PCREL;
29345 break;
29346 }
29347 /* Fall through. */
29348
29349 case BFD_RELOC_ARM_MOVT:
29350 if (fixp->fx_pcrel)
29351 {
29352 code = BFD_RELOC_ARM_MOVT_PCREL;
29353 break;
29354 }
29355 /* Fall through. */
29356
29357 case BFD_RELOC_ARM_THUMB_MOVW:
29358 if (fixp->fx_pcrel)
29359 {
29360 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
29361 break;
29362 }
29363 /* Fall through. */
29364
29365 case BFD_RELOC_ARM_THUMB_MOVT:
29366 if (fixp->fx_pcrel)
29367 {
29368 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
29369 break;
29370 }
29371 /* Fall through. */
29372
29373 case BFD_RELOC_NONE:
29374 case BFD_RELOC_ARM_PCREL_BRANCH:
29375 case BFD_RELOC_ARM_PCREL_BLX:
29376 case BFD_RELOC_RVA:
29377 case BFD_RELOC_THUMB_PCREL_BRANCH7:
29378 case BFD_RELOC_THUMB_PCREL_BRANCH9:
29379 case BFD_RELOC_THUMB_PCREL_BRANCH12:
29380 case BFD_RELOC_THUMB_PCREL_BRANCH20:
29381 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29382 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29383 case BFD_RELOC_VTABLE_ENTRY:
29384 case BFD_RELOC_VTABLE_INHERIT:
29385#ifdef TE_PE
29386 case BFD_RELOC_32_SECREL:
29387#endif
29388 code = fixp->fx_r_type;
29389 break;
29390
29391 case BFD_RELOC_THUMB_PCREL_BLX:
29392#ifdef OBJ_ELF
29393 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
29394 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
29395 else
29396#endif
29397 code = BFD_RELOC_THUMB_PCREL_BLX;
29398 break;
29399
29400 case BFD_RELOC_ARM_LITERAL:
29401 case BFD_RELOC_ARM_HWLITERAL:
29402 /* If this is called then the a literal has
29403 been referenced across a section boundary. */
29404 as_bad_where (fixp->fx_file, fixp->fx_line,
29405 _("literal referenced across section boundary"));
29406 return NULL;
29407
29408#ifdef OBJ_ELF
29409 case BFD_RELOC_ARM_TLS_CALL:
29410 case BFD_RELOC_ARM_THM_TLS_CALL:
29411 case BFD_RELOC_ARM_TLS_DESCSEQ:
29412 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29413 case BFD_RELOC_ARM_GOT32:
29414 case BFD_RELOC_ARM_GOTOFF:
29415 case BFD_RELOC_ARM_GOT_PREL:
29416 case BFD_RELOC_ARM_PLT32:
29417 case BFD_RELOC_ARM_TARGET1:
29418 case BFD_RELOC_ARM_ROSEGREL32:
29419 case BFD_RELOC_ARM_SBREL32:
29420 case BFD_RELOC_ARM_PREL31:
29421 case BFD_RELOC_ARM_TARGET2:
29422 case BFD_RELOC_ARM_TLS_LDO32:
29423 case BFD_RELOC_ARM_PCREL_CALL:
29424 case BFD_RELOC_ARM_PCREL_JUMP:
29425 case BFD_RELOC_ARM_ALU_PC_G0_NC:
29426 case BFD_RELOC_ARM_ALU_PC_G0:
29427 case BFD_RELOC_ARM_ALU_PC_G1_NC:
29428 case BFD_RELOC_ARM_ALU_PC_G1:
29429 case BFD_RELOC_ARM_ALU_PC_G2:
29430 case BFD_RELOC_ARM_LDR_PC_G0:
29431 case BFD_RELOC_ARM_LDR_PC_G1:
29432 case BFD_RELOC_ARM_LDR_PC_G2:
29433 case BFD_RELOC_ARM_LDRS_PC_G0:
29434 case BFD_RELOC_ARM_LDRS_PC_G1:
29435 case BFD_RELOC_ARM_LDRS_PC_G2:
29436 case BFD_RELOC_ARM_LDC_PC_G0:
29437 case BFD_RELOC_ARM_LDC_PC_G1:
29438 case BFD_RELOC_ARM_LDC_PC_G2:
29439 case BFD_RELOC_ARM_ALU_SB_G0_NC:
29440 case BFD_RELOC_ARM_ALU_SB_G0:
29441 case BFD_RELOC_ARM_ALU_SB_G1_NC:
29442 case BFD_RELOC_ARM_ALU_SB_G1:
29443 case BFD_RELOC_ARM_ALU_SB_G2:
29444 case BFD_RELOC_ARM_LDR_SB_G0:
29445 case BFD_RELOC_ARM_LDR_SB_G1:
29446 case BFD_RELOC_ARM_LDR_SB_G2:
29447 case BFD_RELOC_ARM_LDRS_SB_G0:
29448 case BFD_RELOC_ARM_LDRS_SB_G1:
29449 case BFD_RELOC_ARM_LDRS_SB_G2:
29450 case BFD_RELOC_ARM_LDC_SB_G0:
29451 case BFD_RELOC_ARM_LDC_SB_G1:
29452 case BFD_RELOC_ARM_LDC_SB_G2:
29453 case BFD_RELOC_ARM_V4BX:
29454 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29455 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29456 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29457 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29458 case BFD_RELOC_ARM_GOTFUNCDESC:
29459 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29460 case BFD_RELOC_ARM_FUNCDESC:
29461 case BFD_RELOC_ARM_THUMB_BF17:
29462 case BFD_RELOC_ARM_THUMB_BF19:
29463 case BFD_RELOC_ARM_THUMB_BF13:
29464 code = fixp->fx_r_type;
29465 break;
29466
29467 case BFD_RELOC_ARM_TLS_GOTDESC:
29468 case BFD_RELOC_ARM_TLS_GD32:
29469 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29470 case BFD_RELOC_ARM_TLS_LE32:
29471 case BFD_RELOC_ARM_TLS_IE32:
29472 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29473 case BFD_RELOC_ARM_TLS_LDM32:
29474 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29475 /* BFD will include the symbol's address in the addend.
29476 But we don't want that, so subtract it out again here. */
29477 if (!S_IS_COMMON (fixp->fx_addsy))
29478 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
29479 code = fixp->fx_r_type;
29480 break;
29481#endif
29482
29483 case BFD_RELOC_ARM_IMMEDIATE:
29484 as_bad_where (fixp->fx_file, fixp->fx_line,
29485 _("internal relocation (type: IMMEDIATE) not fixed up"));
29486 return NULL;
29487
29488 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
29489 as_bad_where (fixp->fx_file, fixp->fx_line,
29490 _("ADRL used for a symbol not defined in the same file"));
29491 return NULL;
29492
29493 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29494 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29495 case BFD_RELOC_ARM_THUMB_LOOP12:
29496 as_bad_where (fixp->fx_file, fixp->fx_line,
29497 _("%s used for a symbol not defined in the same file"),
29498 bfd_get_reloc_code_name (fixp->fx_r_type));
29499 return NULL;
29500
29501 case BFD_RELOC_ARM_OFFSET_IMM:
29502 if (section->use_rela_p)
29503 {
29504 code = fixp->fx_r_type;
29505 break;
29506 }
29507
29508 if (fixp->fx_addsy != NULL
29509 && !S_IS_DEFINED (fixp->fx_addsy)
29510 && S_IS_LOCAL (fixp->fx_addsy))
29511 {
29512 as_bad_where (fixp->fx_file, fixp->fx_line,
29513 _("undefined local label `%s'"),
29514 S_GET_NAME (fixp->fx_addsy));
29515 return NULL;
29516 }
29517
29518 as_bad_where (fixp->fx_file, fixp->fx_line,
29519 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
29520 return NULL;
29521
29522 default:
29523 {
29524 const char * type;
29525
29526 switch (fixp->fx_r_type)
29527 {
29528 case BFD_RELOC_NONE: type = "NONE"; break;
29529 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
29530 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
29531 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
29532 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
29533 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
29534 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
29535 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
29536 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
29537 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
29538 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
29539 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
29540 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
29541 default: type = _("<unknown>"); break;
29542 }
29543 as_bad_where (fixp->fx_file, fixp->fx_line,
29544 _("cannot represent %s relocation in this object file format"),
29545 type);
29546 return NULL;
29547 }
29548 }
29549
29550#ifdef OBJ_ELF
29551 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
29552 && GOT_symbol
29553 && fixp->fx_addsy == GOT_symbol)
29554 {
29555 code = BFD_RELOC_ARM_GOTPC;
29556 reloc->addend = fixp->fx_offset = reloc->address;
29557 }
29558#endif
29559
29560 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
29561
29562 if (reloc->howto == NULL)
29563 {
29564 as_bad_where (fixp->fx_file, fixp->fx_line,
29565 _("cannot represent %s relocation in this object file format"),
29566 bfd_get_reloc_code_name (code));
29567 return NULL;
29568 }
29569
29570 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
29571 vtable entry to be used in the relocation's section offset. */
29572 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29573 reloc->address = fixp->fx_offset;
29574
29575 return reloc;
29576}
29577
29578/* This fix_new is called by cons via TC_CONS_FIX_NEW. */
29579
29580void
29581cons_fix_new_arm (fragS * frag,
29582 int where,
29583 int size,
29584 expressionS * exp,
29585 bfd_reloc_code_real_type reloc)
29586{
29587 int pcrel = 0;
29588
29589 /* Pick a reloc.
29590 FIXME: @@ Should look at CPU word size. */
29591 switch (size)
29592 {
29593 case 1:
29594 reloc = BFD_RELOC_8;
29595 break;
29596 case 2:
29597 reloc = BFD_RELOC_16;
29598 break;
29599 case 4:
29600 default:
29601 reloc = BFD_RELOC_32;
29602 break;
29603 case 8:
29604 reloc = BFD_RELOC_64;
29605 break;
29606 }
29607
29608#ifdef TE_PE
29609 if (exp->X_op == O_secrel)
29610 {
29611 exp->X_op = O_symbol;
29612 reloc = BFD_RELOC_32_SECREL;
29613 }
29614#endif
29615
29616 fix_new_exp (frag, where, size, exp, pcrel, reloc);
29617}
29618
29619#if defined (OBJ_COFF)
29620void
29621arm_validate_fix (fixS * fixP)
29622{
29623 /* If the destination of the branch is a defined symbol which does not have
29624 the THUMB_FUNC attribute, then we must be calling a function which has
29625 the (interfacearm) attribute. We look for the Thumb entry point to that
29626 function and change the branch to refer to that function instead. */
29627 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
29628 && fixP->fx_addsy != NULL
29629 && S_IS_DEFINED (fixP->fx_addsy)
29630 && ! THUMB_IS_FUNC (fixP->fx_addsy))
29631 {
29632 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
29633 }
29634}
29635#endif
29636
29637
29638int
29639arm_force_relocation (struct fix * fixp)
29640{
29641#if defined (OBJ_COFF) && defined (TE_PE)
29642 if (fixp->fx_r_type == BFD_RELOC_RVA)
29643 return 1;
29644#endif
29645
29646 /* In case we have a call or a branch to a function in ARM ISA mode from
29647 a thumb function or vice-versa force the relocation. These relocations
29648 are cleared off for some cores that might have blx and simple transformations
29649 are possible. */
29650
29651#ifdef OBJ_ELF
29652 switch (fixp->fx_r_type)
29653 {
29654 case BFD_RELOC_ARM_PCREL_JUMP:
29655 case BFD_RELOC_ARM_PCREL_CALL:
29656 case BFD_RELOC_THUMB_PCREL_BLX:
29657 if (THUMB_IS_FUNC (fixp->fx_addsy))
29658 return 1;
29659 break;
29660
29661 case BFD_RELOC_ARM_PCREL_BLX:
29662 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29663 case BFD_RELOC_THUMB_PCREL_BRANCH20:
29664 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29665 if (ARM_IS_FUNC (fixp->fx_addsy))
29666 return 1;
29667 break;
29668
29669 default:
29670 break;
29671 }
29672#endif
29673
29674 /* Resolve these relocations even if the symbol is extern or weak.
29675 Technically this is probably wrong due to symbol preemption.
29676 In practice these relocations do not have enough range to be useful
29677 at dynamic link time, and some code (e.g. in the Linux kernel)
29678 expects these references to be resolved. */
29679 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
29680 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
29681 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
29682 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
29683 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29684 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
29685 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
29686 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
29687 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
29688 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
29689 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
29690 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
29691 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
29692 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
29693 return 0;
29694
29695 /* Always leave these relocations for the linker. */
29696 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29697 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29698 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29699 return 1;
29700
29701 /* Always generate relocations against function symbols. */
29702 if (fixp->fx_r_type == BFD_RELOC_32
29703 && fixp->fx_addsy
29704 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
29705 return 1;
29706
29707 return generic_force_reloc (fixp);
29708}
29709
29710#if defined (OBJ_ELF) || defined (OBJ_COFF)
29711/* Relocations against function names must be left unadjusted,
29712 so that the linker can use this information to generate interworking
29713 stubs. The MIPS version of this function
29714 also prevents relocations that are mips-16 specific, but I do not
29715 know why it does this.
29716
29717 FIXME:
29718 There is one other problem that ought to be addressed here, but
29719 which currently is not: Taking the address of a label (rather
29720 than a function) and then later jumping to that address. Such
29721 addresses also ought to have their bottom bit set (assuming that
29722 they reside in Thumb code), but at the moment they will not. */
29723
29724bfd_boolean
29725arm_fix_adjustable (fixS * fixP)
29726{
29727 if (fixP->fx_addsy == NULL)
29728 return 1;
29729
29730 /* Preserve relocations against symbols with function type. */
29731 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
29732 return FALSE;
29733
29734 if (THUMB_IS_FUNC (fixP->fx_addsy)
29735 && fixP->fx_subsy == NULL)
29736 return FALSE;
29737
29738 /* We need the symbol name for the VTABLE entries. */
29739 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
29740 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29741 return FALSE;
29742
29743 /* Don't allow symbols to be discarded on GOT related relocs. */
29744 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
29745 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
29746 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
29747 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
29748 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
29749 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
29750 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
29751 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
29752 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
29753 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
29754 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
29755 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
29756 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
29757 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
29758 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
29759 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
29760 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
29761 return FALSE;
29762
29763 /* Similarly for group relocations. */
29764 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29765 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29766 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29767 return FALSE;
29768
29769 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
29770 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
29771 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29772 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
29773 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
29774 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29775 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
29776 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
29777 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
29778 return FALSE;
29779
29780 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
29781 offsets, so keep these symbols. */
29782 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
29783 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
29784 return FALSE;
29785
29786 return TRUE;
29787}
29788#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
29789
29790#ifdef OBJ_ELF
29791const char *
29792elf32_arm_target_format (void)
29793{
29794#ifdef TE_SYMBIAN
29795 return (target_big_endian
29796 ? "elf32-bigarm-symbian"
29797 : "elf32-littlearm-symbian");
29798#elif defined (TE_VXWORKS)
29799 return (target_big_endian
29800 ? "elf32-bigarm-vxworks"
29801 : "elf32-littlearm-vxworks");
29802#elif defined (TE_NACL)
29803 return (target_big_endian
29804 ? "elf32-bigarm-nacl"
29805 : "elf32-littlearm-nacl");
29806#else
29807 if (arm_fdpic)
29808 {
29809 if (target_big_endian)
29810 return "elf32-bigarm-fdpic";
29811 else
29812 return "elf32-littlearm-fdpic";
29813 }
29814 else
29815 {
29816 if (target_big_endian)
29817 return "elf32-bigarm";
29818 else
29819 return "elf32-littlearm";
29820 }
29821#endif
29822}
29823
29824void
29825armelf_frob_symbol (symbolS * symp,
29826 int * puntp)
29827{
29828 elf_frob_symbol (symp, puntp);
29829}
29830#endif
29831
29832/* MD interface: Finalization. */
29833
29834void
29835arm_cleanup (void)
29836{
29837 literal_pool * pool;
29838
29839 /* Ensure that all the predication blocks are properly closed. */
29840 check_pred_blocks_finished ();
29841
29842 for (pool = list_of_pools; pool; pool = pool->next)
29843 {
29844 /* Put it at the end of the relevant section. */
29845 subseg_set (pool->section, pool->sub_section);
29846#ifdef OBJ_ELF
29847 arm_elf_change_section ();
29848#endif
29849 s_ltorg (0);
29850 }
29851}
29852
29853#ifdef OBJ_ELF
29854/* Remove any excess mapping symbols generated for alignment frags in
29855 SEC. We may have created a mapping symbol before a zero byte
29856 alignment; remove it if there's a mapping symbol after the
29857 alignment. */
29858static void
29859check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
29860 void *dummy ATTRIBUTE_UNUSED)
29861{
29862 segment_info_type *seginfo = seg_info (sec);
29863 fragS *fragp;
29864
29865 if (seginfo == NULL || seginfo->frchainP == NULL)
29866 return;
29867
29868 for (fragp = seginfo->frchainP->frch_root;
29869 fragp != NULL;
29870 fragp = fragp->fr_next)
29871 {
29872 symbolS *sym = fragp->tc_frag_data.last_map;
29873 fragS *next = fragp->fr_next;
29874
29875 /* Variable-sized frags have been converted to fixed size by
29876 this point. But if this was variable-sized to start with,
29877 there will be a fixed-size frag after it. So don't handle
29878 next == NULL. */
29879 if (sym == NULL || next == NULL)
29880 continue;
29881
29882 if (S_GET_VALUE (sym) < next->fr_address)
29883 /* Not at the end of this frag. */
29884 continue;
29885 know (S_GET_VALUE (sym) == next->fr_address);
29886
29887 do
29888 {
29889 if (next->tc_frag_data.first_map != NULL)
29890 {
29891 /* Next frag starts with a mapping symbol. Discard this
29892 one. */
29893 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
29894 break;
29895 }
29896
29897 if (next->fr_next == NULL)
29898 {
29899 /* This mapping symbol is at the end of the section. Discard
29900 it. */
29901 know (next->fr_fix == 0 && next->fr_var == 0);
29902 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
29903 break;
29904 }
29905
29906 /* As long as we have empty frags without any mapping symbols,
29907 keep looking. */
29908 /* If the next frag is non-empty and does not start with a
29909 mapping symbol, then this mapping symbol is required. */
29910 if (next->fr_address != next->fr_next->fr_address)
29911 break;
29912
29913 next = next->fr_next;
29914 }
29915 while (next != NULL);
29916 }
29917}
29918#endif
29919
29920/* Adjust the symbol table. This marks Thumb symbols as distinct from
29921 ARM ones. */
29922
29923void
29924arm_adjust_symtab (void)
29925{
29926#ifdef OBJ_COFF
29927 symbolS * sym;
29928
29929 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
29930 {
29931 if (ARM_IS_THUMB (sym))
29932 {
29933 if (THUMB_IS_FUNC (sym))
29934 {
29935 /* Mark the symbol as a Thumb function. */
29936 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
29937 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
29938 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
29939
29940 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
29941 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
29942 else
29943 as_bad (_("%s: unexpected function type: %d"),
29944 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
29945 }
29946 else switch (S_GET_STORAGE_CLASS (sym))
29947 {
29948 case C_EXT:
29949 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
29950 break;
29951 case C_STAT:
29952 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
29953 break;
29954 case C_LABEL:
29955 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
29956 break;
29957 default:
29958 /* Do nothing. */
29959 break;
29960 }
29961 }
29962
29963 if (ARM_IS_INTERWORK (sym))
29964 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
29965 }
29966#endif
29967#ifdef OBJ_ELF
29968 symbolS * sym;
29969 char bind;
29970
29971 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
29972 {
29973 if (ARM_IS_THUMB (sym))
29974 {
29975 elf_symbol_type * elf_sym;
29976
29977 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
29978 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
29979
29980 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
29981 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
29982 {
29983 /* If it's a .thumb_func, declare it as so,
29984 otherwise tag label as .code 16. */
29985 if (THUMB_IS_FUNC (sym))
29986 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
29987 ST_BRANCH_TO_THUMB);
29988 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
29989 elf_sym->internal_elf_sym.st_info =
29990 ELF_ST_INFO (bind, STT_ARM_16BIT);
29991 }
29992 }
29993 }
29994
29995 /* Remove any overlapping mapping symbols generated by alignment frags. */
29996 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
29997 /* Now do generic ELF adjustments. */
29998 elf_adjust_symtab ();
29999#endif
30000}
30001
30002/* MD interface: Initialization. */
30003
30004static void
30005set_constant_flonums (void)
30006{
30007 int i;
30008
30009 for (i = 0; i < NUM_FLOAT_VALS; i++)
30010 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30011 abort ();
30012}
30013
30014/* Auto-select Thumb mode if it's the only available instruction set for the
30015 given architecture. */
30016
30017static void
30018autoselect_thumb_from_cpu_variant (void)
30019{
30020 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30021 opcode_select (16);
30022}
30023
30024void
30025md_begin (void)
30026{
30027 unsigned mach;
30028 unsigned int i;
30029
30030 if ( (arm_ops_hsh = hash_new ()) == NULL
30031 || (arm_cond_hsh = hash_new ()) == NULL
30032 || (arm_vcond_hsh = hash_new ()) == NULL
30033 || (arm_shift_hsh = hash_new ()) == NULL
30034 || (arm_psr_hsh = hash_new ()) == NULL
30035 || (arm_v7m_psr_hsh = hash_new ()) == NULL
30036 || (arm_reg_hsh = hash_new ()) == NULL
30037 || (arm_reloc_hsh = hash_new ()) == NULL
30038 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
30039 as_fatal (_("virtual memory exhausted"));
30040
30041 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30042 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
30043 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30044 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
30045 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30046 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
30047 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30048 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
30049 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30050 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
30051 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30052 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30053 (void *) (v7m_psrs + i));
30054 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30055 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
30056 for (i = 0;
30057 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30058 i++)
30059 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30060 (void *) (barrier_opt_names + i));
30061#ifdef OBJ_ELF
30062 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30063 {
30064 struct reloc_entry * entry = reloc_names + i;
30065
30066 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30067 /* This makes encode_branch() use the EABI versions of this relocation. */
30068 entry->reloc = BFD_RELOC_UNUSED;
30069
30070 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
30071 }
30072#endif
30073
30074 set_constant_flonums ();
30075
30076 /* Set the cpu variant based on the command-line options. We prefer
30077 -mcpu= over -march= if both are set (as for GCC); and we prefer
30078 -mfpu= over any other way of setting the floating point unit.
30079 Use of legacy options with new options are faulted. */
30080 if (legacy_cpu)
30081 {
30082 if (mcpu_cpu_opt || march_cpu_opt)
30083 as_bad (_("use of old and new-style options to set CPU type"));
30084
30085 selected_arch = *legacy_cpu;
30086 }
30087 else if (mcpu_cpu_opt)
30088 {
30089 selected_arch = *mcpu_cpu_opt;
30090 selected_ext = *mcpu_ext_opt;
30091 }
30092 else if (march_cpu_opt)
30093 {
30094 selected_arch = *march_cpu_opt;
30095 selected_ext = *march_ext_opt;
30096 }
30097 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30098
30099 if (legacy_fpu)
30100 {
30101 if (mfpu_opt)
30102 as_bad (_("use of old and new-style options to set FPU type"));
30103
30104 selected_fpu = *legacy_fpu;
30105 }
30106 else if (mfpu_opt)
30107 selected_fpu = *mfpu_opt;
30108 else
30109 {
30110#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30111 || defined (TE_NetBSD) || defined (TE_VXWORKS))
30112 /* Some environments specify a default FPU. If they don't, infer it
30113 from the processor. */
30114 if (mcpu_fpu_opt)
30115 selected_fpu = *mcpu_fpu_opt;
30116 else if (march_fpu_opt)
30117 selected_fpu = *march_fpu_opt;
30118#else
30119 selected_fpu = fpu_default;
30120#endif
30121 }
30122
30123 if (ARM_FEATURE_ZERO (selected_fpu))
30124 {
30125 if (!no_cpu_selected ())
30126 selected_fpu = fpu_default;
30127 else
30128 selected_fpu = fpu_arch_fpa;
30129 }
30130
30131#ifdef CPU_DEFAULT
30132 if (ARM_FEATURE_ZERO (selected_arch))
30133 {
30134 selected_arch = cpu_default;
30135 selected_cpu = selected_arch;
30136 }
30137 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30138#else
30139 /* Autodection of feature mode: allow all features in cpu_variant but leave
30140 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
30141 after all instruction have been processed and we can decide what CPU
30142 should be selected. */
30143 if (ARM_FEATURE_ZERO (selected_arch))
30144 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30145 else
30146 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30147#endif
30148
30149 autoselect_thumb_from_cpu_variant ();
30150
30151 arm_arch_used = thumb_arch_used = arm_arch_none;
30152
30153#if defined OBJ_COFF || defined OBJ_ELF
30154 {
30155 unsigned int flags = 0;
30156
30157#if defined OBJ_ELF
30158 flags = meabi_flags;
30159
30160 switch (meabi_flags)
30161 {
30162 case EF_ARM_EABI_UNKNOWN:
30163#endif
30164 /* Set the flags in the private structure. */
30165 if (uses_apcs_26) flags |= F_APCS26;
30166 if (support_interwork) flags |= F_INTERWORK;
30167 if (uses_apcs_float) flags |= F_APCS_FLOAT;
30168 if (pic_code) flags |= F_PIC;
30169 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30170 flags |= F_SOFT_FLOAT;
30171
30172 switch (mfloat_abi_opt)
30173 {
30174 case ARM_FLOAT_ABI_SOFT:
30175 case ARM_FLOAT_ABI_SOFTFP:
30176 flags |= F_SOFT_FLOAT;
30177 break;
30178
30179 case ARM_FLOAT_ABI_HARD:
30180 if (flags & F_SOFT_FLOAT)
30181 as_bad (_("hard-float conflicts with specified fpu"));
30182 break;
30183 }
30184
30185 /* Using pure-endian doubles (even if soft-float). */
30186 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30187 flags |= F_VFP_FLOAT;
30188
30189#if defined OBJ_ELF
30190 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30191 flags |= EF_ARM_MAVERICK_FLOAT;
30192 break;
30193
30194 case EF_ARM_EABI_VER4:
30195 case EF_ARM_EABI_VER5:
30196 /* No additional flags to set. */
30197 break;
30198
30199 default:
30200 abort ();
30201 }
30202#endif
30203 bfd_set_private_flags (stdoutput, flags);
30204
30205 /* We have run out flags in the COFF header to encode the
30206 status of ATPCS support, so instead we create a dummy,
30207 empty, debug section called .arm.atpcs. */
30208 if (atpcs)
30209 {
30210 asection * sec;
30211
30212 sec = bfd_make_section (stdoutput, ".arm.atpcs");
30213
30214 if (sec != NULL)
30215 {
30216 bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30217 bfd_set_section_size (sec, 0);
30218 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30219 }
30220 }
30221 }
30222#endif
30223
30224 /* Record the CPU type as well. */
30225 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30226 mach = bfd_mach_arm_iWMMXt2;
30227 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30228 mach = bfd_mach_arm_iWMMXt;
30229 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30230 mach = bfd_mach_arm_XScale;
30231 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30232 mach = bfd_mach_arm_ep9312;
30233 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30234 mach = bfd_mach_arm_5TE;
30235 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30236 {
30237 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30238 mach = bfd_mach_arm_5T;
30239 else
30240 mach = bfd_mach_arm_5;
30241 }
30242 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30243 {
30244 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30245 mach = bfd_mach_arm_4T;
30246 else
30247 mach = bfd_mach_arm_4;
30248 }
30249 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30250 mach = bfd_mach_arm_3M;
30251 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30252 mach = bfd_mach_arm_3;
30253 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30254 mach = bfd_mach_arm_2a;
30255 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30256 mach = bfd_mach_arm_2;
30257 else
30258 mach = bfd_mach_arm_unknown;
30259
30260 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30261}
30262
30263/* Command line processing. */
30264
30265/* md_parse_option
30266 Invocation line includes a switch not recognized by the base assembler.
30267 See if it's a processor-specific option.
30268
30269 This routine is somewhat complicated by the need for backwards
30270 compatibility (since older releases of gcc can't be changed).
30271 The new options try to make the interface as compatible as
30272 possible with GCC.
30273
30274 New options (supported) are:
30275
30276 -mcpu=<cpu name> Assemble for selected processor
30277 -march=<architecture name> Assemble for selected architecture
30278 -mfpu=<fpu architecture> Assemble for selected FPU.
30279 -EB/-mbig-endian Big-endian
30280 -EL/-mlittle-endian Little-endian
30281 -k Generate PIC code
30282 -mthumb Start in Thumb mode
30283 -mthumb-interwork Code supports ARM/Thumb interworking
30284
30285 -m[no-]warn-deprecated Warn about deprecated features
30286 -m[no-]warn-syms Warn when symbols match instructions
30287
30288 For now we will also provide support for:
30289
30290 -mapcs-32 32-bit Program counter
30291 -mapcs-26 26-bit Program counter
30292 -macps-float Floats passed in FP registers
30293 -mapcs-reentrant Reentrant code
30294 -matpcs
30295 (sometime these will probably be replaced with -mapcs=<list of options>
30296 and -matpcs=<list of options>)
30297
30298 The remaining options are only supported for back-wards compatibility.
30299 Cpu variants, the arm part is optional:
30300 -m[arm]1 Currently not supported.
30301 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
30302 -m[arm]3 Arm 3 processor
30303 -m[arm]6[xx], Arm 6 processors
30304 -m[arm]7[xx][t][[d]m] Arm 7 processors
30305 -m[arm]8[10] Arm 8 processors
30306 -m[arm]9[20][tdmi] Arm 9 processors
30307 -mstrongarm[110[0]] StrongARM processors
30308 -mxscale XScale processors
30309 -m[arm]v[2345[t[e]]] Arm architectures
30310 -mall All (except the ARM1)
30311 FP variants:
30312 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
30313 -mfpe-old (No float load/store multiples)
30314 -mvfpxd VFP Single precision
30315 -mvfp All VFP
30316 -mno-fpu Disable all floating point instructions
30317
30318 The following CPU names are recognized:
30319 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
30320 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
30321 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
30322 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
30323 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
30324 arm10t arm10e, arm1020t, arm1020e, arm10200e,
30325 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
30326
30327 */
30328
30329const char * md_shortopts = "m:k";
30330
30331#ifdef ARM_BI_ENDIAN
30332#define OPTION_EB (OPTION_MD_BASE + 0)
30333#define OPTION_EL (OPTION_MD_BASE + 1)
30334#else
30335#if TARGET_BYTES_BIG_ENDIAN
30336#define OPTION_EB (OPTION_MD_BASE + 0)
30337#else
30338#define OPTION_EL (OPTION_MD_BASE + 1)
30339#endif
30340#endif
30341#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
30342#define OPTION_FDPIC (OPTION_MD_BASE + 3)
30343
30344struct option md_longopts[] =
30345{
30346#ifdef OPTION_EB
30347 {"EB", no_argument, NULL, OPTION_EB},
30348#endif
30349#ifdef OPTION_EL
30350 {"EL", no_argument, NULL, OPTION_EL},
30351#endif
30352 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
30353#ifdef OBJ_ELF
30354 {"fdpic", no_argument, NULL, OPTION_FDPIC},
30355#endif
30356 {NULL, no_argument, NULL, 0}
30357};
30358
30359size_t md_longopts_size = sizeof (md_longopts);
30360
30361struct arm_option_table
30362{
30363 const char * option; /* Option name to match. */
30364 const char * help; /* Help information. */
30365 int * var; /* Variable to change. */
30366 int value; /* What to change it to. */
30367 const char * deprecated; /* If non-null, print this message. */
30368};
30369
30370struct arm_option_table arm_opts[] =
30371{
30372 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
30373 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
30374 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
30375 &support_interwork, 1, NULL},
30376 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
30377 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
30378 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
30379 1, NULL},
30380 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
30381 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
30382 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
30383 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
30384 NULL},
30385
30386 /* These are recognized by the assembler, but have no affect on code. */
30387 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
30388 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
30389
30390 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
30391 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
30392 &warn_on_deprecated, 0, NULL},
30393 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
30394 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
30395 {NULL, NULL, NULL, 0, NULL}
30396};
30397
30398struct arm_legacy_option_table
30399{
30400 const char * option; /* Option name to match. */
30401 const arm_feature_set ** var; /* Variable to change. */
30402 const arm_feature_set value; /* What to change it to. */
30403 const char * deprecated; /* If non-null, print this message. */
30404};
30405
30406const struct arm_legacy_option_table arm_legacy_opts[] =
30407{
30408 /* DON'T add any new processors to this list -- we want the whole list
30409 to go away... Add them to the processors table instead. */
30410 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
30411 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
30412 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
30413 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
30414 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30415 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30416 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30417 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30418 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
30419 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
30420 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
30421 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
30422 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
30423 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
30424 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
30425 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
30426 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
30427 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
30428 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
30429 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
30430 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
30431 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
30432 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
30433 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
30434 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
30435 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
30436 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
30437 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
30438 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
30439 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
30440 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
30441 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
30442 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
30443 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
30444 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30445 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30446 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30447 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30448 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30449 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30450 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
30451 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
30452 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
30453 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
30454 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
30455 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
30456 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30457 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30458 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30459 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30460 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30461 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30462 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30463 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30464 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30465 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30466 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
30467 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
30468 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
30469 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
30470 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30471 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30472 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30473 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30474 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30475 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30476 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30477 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30478 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
30479 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
30480 N_("use -mcpu=strongarm110")},
30481 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
30482 N_("use -mcpu=strongarm1100")},
30483 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
30484 N_("use -mcpu=strongarm1110")},
30485 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
30486 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
30487 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
30488
30489 /* Architecture variants -- don't add any more to this list either. */
30490 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
30491 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
30492 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30493 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30494 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
30495 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
30496 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30497 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30498 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
30499 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
30500 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30501 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30502 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
30503 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
30504 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30505 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30506 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30507 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30508
30509 /* Floating point variants -- don't add any more to this list either. */
30510 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
30511 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
30512 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
30513 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
30514 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
30515
30516 {NULL, NULL, ARM_ARCH_NONE, NULL}
30517};
30518
30519struct arm_cpu_option_table
30520{
30521 const char * name;
30522 size_t name_len;
30523 const arm_feature_set value;
30524 const arm_feature_set ext;
30525 /* For some CPUs we assume an FPU unless the user explicitly sets
30526 -mfpu=... */
30527 const arm_feature_set default_fpu;
30528 /* The canonical name of the CPU, or NULL to use NAME converted to upper
30529 case. */
30530 const char * canonical_name;
30531};
30532
30533/* This list should, at a minimum, contain all the cpu names
30534 recognized by GCC. */
30535#define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
30536
30537static const struct arm_cpu_option_table arm_cpus[] =
30538{
30539 ARM_CPU_OPT ("all", NULL, ARM_ANY,
30540 ARM_ARCH_NONE,
30541 FPU_ARCH_FPA),
30542 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
30543 ARM_ARCH_NONE,
30544 FPU_ARCH_FPA),
30545 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
30546 ARM_ARCH_NONE,
30547 FPU_ARCH_FPA),
30548 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
30549 ARM_ARCH_NONE,
30550 FPU_ARCH_FPA),
30551 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
30552 ARM_ARCH_NONE,
30553 FPU_ARCH_FPA),
30554 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
30555 ARM_ARCH_NONE,
30556 FPU_ARCH_FPA),
30557 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
30558 ARM_ARCH_NONE,
30559 FPU_ARCH_FPA),
30560 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
30561 ARM_ARCH_NONE,
30562 FPU_ARCH_FPA),
30563 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
30564 ARM_ARCH_NONE,
30565 FPU_ARCH_FPA),
30566 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
30567 ARM_ARCH_NONE,
30568 FPU_ARCH_FPA),
30569 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
30570 ARM_ARCH_NONE,
30571 FPU_ARCH_FPA),
30572 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
30573 ARM_ARCH_NONE,
30574 FPU_ARCH_FPA),
30575 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
30576 ARM_ARCH_NONE,
30577 FPU_ARCH_FPA),
30578 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
30579 ARM_ARCH_NONE,
30580 FPU_ARCH_FPA),
30581 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
30582 ARM_ARCH_NONE,
30583 FPU_ARCH_FPA),
30584 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
30585 ARM_ARCH_NONE,
30586 FPU_ARCH_FPA),
30587 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
30588 ARM_ARCH_NONE,
30589 FPU_ARCH_FPA),
30590 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
30591 ARM_ARCH_NONE,
30592 FPU_ARCH_FPA),
30593 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
30594 ARM_ARCH_NONE,
30595 FPU_ARCH_FPA),
30596 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
30597 ARM_ARCH_NONE,
30598 FPU_ARCH_FPA),
30599 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
30600 ARM_ARCH_NONE,
30601 FPU_ARCH_FPA),
30602 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
30603 ARM_ARCH_NONE,
30604 FPU_ARCH_FPA),
30605 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
30606 ARM_ARCH_NONE,
30607 FPU_ARCH_FPA),
30608 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
30609 ARM_ARCH_NONE,
30610 FPU_ARCH_FPA),
30611 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
30612 ARM_ARCH_NONE,
30613 FPU_ARCH_FPA),
30614 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
30615 ARM_ARCH_NONE,
30616 FPU_ARCH_FPA),
30617 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
30618 ARM_ARCH_NONE,
30619 FPU_ARCH_FPA),
30620 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
30621 ARM_ARCH_NONE,
30622 FPU_ARCH_FPA),
30623 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
30624 ARM_ARCH_NONE,
30625 FPU_ARCH_FPA),
30626 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
30627 ARM_ARCH_NONE,
30628 FPU_ARCH_FPA),
30629 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
30630 ARM_ARCH_NONE,
30631 FPU_ARCH_FPA),
30632 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
30633 ARM_ARCH_NONE,
30634 FPU_ARCH_FPA),
30635 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
30636 ARM_ARCH_NONE,
30637 FPU_ARCH_FPA),
30638 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
30639 ARM_ARCH_NONE,
30640 FPU_ARCH_FPA),
30641 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
30642 ARM_ARCH_NONE,
30643 FPU_ARCH_FPA),
30644 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
30645 ARM_ARCH_NONE,
30646 FPU_ARCH_FPA),
30647 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
30648 ARM_ARCH_NONE,
30649 FPU_ARCH_FPA),
30650 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
30651 ARM_ARCH_NONE,
30652 FPU_ARCH_FPA),
30653 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
30654 ARM_ARCH_NONE,
30655 FPU_ARCH_FPA),
30656 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
30657 ARM_ARCH_NONE,
30658 FPU_ARCH_FPA),
30659 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
30660 ARM_ARCH_NONE,
30661 FPU_ARCH_FPA),
30662 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
30663 ARM_ARCH_NONE,
30664 FPU_ARCH_FPA),
30665 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
30666 ARM_ARCH_NONE,
30667 FPU_ARCH_FPA),
30668 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
30669 ARM_ARCH_NONE,
30670 FPU_ARCH_FPA),
30671 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
30672 ARM_ARCH_NONE,
30673 FPU_ARCH_FPA),
30674 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
30675 ARM_ARCH_NONE,
30676 FPU_ARCH_FPA),
30677
30678 /* For V5 or later processors we default to using VFP; but the user
30679 should really set the FPU type explicitly. */
30680 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
30681 ARM_ARCH_NONE,
30682 FPU_ARCH_VFP_V2),
30683 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
30684 ARM_ARCH_NONE,
30685 FPU_ARCH_VFP_V2),
30686 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
30687 ARM_ARCH_NONE,
30688 FPU_ARCH_VFP_V2),
30689 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
30690 ARM_ARCH_NONE,
30691 FPU_ARCH_VFP_V2),
30692 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
30693 ARM_ARCH_NONE,
30694 FPU_ARCH_VFP_V2),
30695 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
30696 ARM_ARCH_NONE,
30697 FPU_ARCH_VFP_V2),
30698 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
30699 ARM_ARCH_NONE,
30700 FPU_ARCH_VFP_V2),
30701 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
30702 ARM_ARCH_NONE,
30703 FPU_ARCH_VFP_V2),
30704 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
30705 ARM_ARCH_NONE,
30706 FPU_ARCH_VFP_V2),
30707 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
30708 ARM_ARCH_NONE,
30709 FPU_ARCH_VFP_V2),
30710 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
30711 ARM_ARCH_NONE,
30712 FPU_ARCH_VFP_V2),
30713 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
30714 ARM_ARCH_NONE,
30715 FPU_ARCH_VFP_V2),
30716 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
30717 ARM_ARCH_NONE,
30718 FPU_ARCH_VFP_V1),
30719 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
30720 ARM_ARCH_NONE,
30721 FPU_ARCH_VFP_V1),
30722 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
30723 ARM_ARCH_NONE,
30724 FPU_ARCH_VFP_V2),
30725 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
30726 ARM_ARCH_NONE,
30727 FPU_ARCH_VFP_V2),
30728 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
30729 ARM_ARCH_NONE,
30730 FPU_ARCH_VFP_V1),
30731 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
30732 ARM_ARCH_NONE,
30733 FPU_ARCH_VFP_V2),
30734 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
30735 ARM_ARCH_NONE,
30736 FPU_ARCH_VFP_V2),
30737 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
30738 ARM_ARCH_NONE,
30739 FPU_ARCH_VFP_V2),
30740 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
30741 ARM_ARCH_NONE,
30742 FPU_ARCH_VFP_V2),
30743 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
30744 ARM_ARCH_NONE,
30745 FPU_ARCH_VFP_V2),
30746 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
30747 ARM_ARCH_NONE,
30748 FPU_ARCH_VFP_V2),
30749 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
30750 ARM_ARCH_NONE,
30751 FPU_ARCH_VFP_V2),
30752 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
30753 ARM_ARCH_NONE,
30754 FPU_ARCH_VFP_V2),
30755 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
30756 ARM_ARCH_NONE,
30757 FPU_ARCH_VFP_V2),
30758 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
30759 ARM_ARCH_NONE,
30760 FPU_NONE),
30761 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
30762 ARM_ARCH_NONE,
30763 FPU_NONE),
30764 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
30765 ARM_ARCH_NONE,
30766 FPU_ARCH_VFP_V2),
30767 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
30768 ARM_ARCH_NONE,
30769 FPU_ARCH_VFP_V2),
30770 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
30771 ARM_ARCH_NONE,
30772 FPU_ARCH_VFP_V2),
30773 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
30774 ARM_ARCH_NONE,
30775 FPU_NONE),
30776 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
30777 ARM_ARCH_NONE,
30778 FPU_NONE),
30779 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
30780 ARM_ARCH_NONE,
30781 FPU_ARCH_VFP_V2),
30782 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
30783 ARM_ARCH_NONE,
30784 FPU_NONE),
30785 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
30786 ARM_ARCH_NONE,
30787 FPU_ARCH_VFP_V2),
30788 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
30789 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30790 FPU_NONE),
30791 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
30792 ARM_ARCH_NONE,
30793 FPU_ARCH_NEON_VFP_V4),
30794 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
30795 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
30796 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30797 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
30798 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30799 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30800 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
30801 ARM_ARCH_NONE,
30802 FPU_ARCH_NEON_VFP_V4),
30803 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
30804 ARM_ARCH_NONE,
30805 FPU_ARCH_NEON_VFP_V4),
30806 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
30807 ARM_ARCH_NONE,
30808 FPU_ARCH_NEON_VFP_V4),
30809 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
30810 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30811 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30812 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
30813 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30814 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30815 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
30816 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30817 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30818 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
30819 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30820 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30821 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
30822 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30823 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30824 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
30825 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30826 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30827 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
30828 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30829 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30830 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
30831 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30832 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30833 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
30834 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30835 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30836 ARM_CPU_OPT ("cortex-a76ae", "Cortex-A76AE", ARM_ARCH_V8_2A,
30837 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30838 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30839 ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A,
30840 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30841 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30842 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
30843 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30844 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30845 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
30846 ARM_ARCH_NONE,
30847 FPU_NONE),
30848 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
30849 ARM_ARCH_NONE,
30850 FPU_ARCH_VFP_V3D16),
30851 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
30852 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30853 FPU_NONE),
30854 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
30855 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30856 FPU_ARCH_VFP_V3D16),
30857 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
30858 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30859 FPU_ARCH_VFP_V3D16),
30860 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
30861 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30862 FPU_ARCH_NEON_VFP_ARMV8),
30863 ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN,
30864 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
30865 FPU_NONE),
30866 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
30867 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
30868 FPU_NONE),
30869 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
30870 ARM_ARCH_NONE,
30871 FPU_NONE),
30872 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
30873 ARM_ARCH_NONE,
30874 FPU_NONE),
30875 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
30876 ARM_ARCH_NONE,
30877 FPU_NONE),
30878 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
30879 ARM_ARCH_NONE,
30880 FPU_NONE),
30881 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
30882 ARM_ARCH_NONE,
30883 FPU_NONE),
30884 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
30885 ARM_ARCH_NONE,
30886 FPU_NONE),
30887 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
30888 ARM_ARCH_NONE,
30889 FPU_NONE),
30890 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
30891 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30892 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30893 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
30894 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30895 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30896 /* ??? XSCALE is really an architecture. */
30897 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
30898 ARM_ARCH_NONE,
30899 FPU_ARCH_VFP_V2),
30900
30901 /* ??? iwmmxt is not a processor. */
30902 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
30903 ARM_ARCH_NONE,
30904 FPU_ARCH_VFP_V2),
30905 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
30906 ARM_ARCH_NONE,
30907 FPU_ARCH_VFP_V2),
30908 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
30909 ARM_ARCH_NONE,
30910 FPU_ARCH_VFP_V2),
30911
30912 /* Maverick. */
30913 ARM_CPU_OPT ("ep9312", "ARM920T",
30914 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
30915 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
30916
30917 /* Marvell processors. */
30918 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
30919 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30920 FPU_ARCH_VFP_V3D16),
30921 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
30922 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30923 FPU_ARCH_NEON_VFP_V4),
30924
30925 /* APM X-Gene family. */
30926 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
30927 ARM_ARCH_NONE,
30928 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30929 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
30930 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30931 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30932
30933 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
30934};
30935#undef ARM_CPU_OPT
30936
30937struct arm_ext_table
30938{
30939 const char * name;
30940 size_t name_len;
30941 const arm_feature_set merge;
30942 const arm_feature_set clear;
30943};
30944
30945struct arm_arch_option_table
30946{
30947 const char * name;
30948 size_t name_len;
30949 const arm_feature_set value;
30950 const arm_feature_set default_fpu;
30951 const struct arm_ext_table * ext_table;
30952};
30953
30954/* Used to add support for +E and +noE extension. */
30955#define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
30956/* Used to add support for a +E extension. */
30957#define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
30958/* Used to add support for a +noE extension. */
30959#define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
30960
30961#define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
30962 ~0 & ~FPU_ENDIAN_PURE)
30963
30964static const struct arm_ext_table armv5te_ext_table[] =
30965{
30966 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
30967 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
30968};
30969
30970static const struct arm_ext_table armv7_ext_table[] =
30971{
30972 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
30973 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
30974};
30975
30976static const struct arm_ext_table armv7ve_ext_table[] =
30977{
30978 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
30979 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
30980 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
30981 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
30982 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
30983 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
30984 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
30985
30986 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
30987 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
30988
30989 /* Aliases for +simd. */
30990 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
30991
30992 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
30993 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
30994 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
30995
30996 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
30997};
30998
30999static const struct arm_ext_table armv7a_ext_table[] =
31000{
31001 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31002 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31003 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31004 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31005 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31006 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31007 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31008
31009 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31010 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31011
31012 /* Aliases for +simd. */
31013 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31014 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31015
31016 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31017 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31018
31019 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31020 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31021 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31022};
31023
31024static const struct arm_ext_table armv7r_ext_table[] =
31025{
31026 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31027 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
31028 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31029 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31030 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31031 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31032 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31033 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31034 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31035};
31036
31037static const struct arm_ext_table armv7em_ext_table[] =
31038{
31039 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31040 /* Alias for +fp, used to be known as fpv4-sp-d16. */
31041 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31042 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31043 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31044 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31045 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31046};
31047
31048static const struct arm_ext_table armv8a_ext_table[] =
31049{
31050 ARM_ADD ("crc", ARCH_CRC_ARMV8),
31051 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31052 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31053 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31054
31055 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31056 should use the +simd option to turn on FP. */
31057 ARM_REMOVE ("fp", ALL_FP),
31058 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31059 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31060 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31061};
31062
31063
31064static const struct arm_ext_table armv81a_ext_table[] =
31065{
31066 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31067 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31068 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31069
31070 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31071 should use the +simd option to turn on FP. */
31072 ARM_REMOVE ("fp", ALL_FP),
31073 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31074 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31075 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31076};
31077
31078static const struct arm_ext_table armv82a_ext_table[] =
31079{
31080 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31081 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31082 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31083 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31084 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31085 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31086
31087 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31088 should use the +simd option to turn on FP. */
31089 ARM_REMOVE ("fp", ALL_FP),
31090 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31091 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31092 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31093};
31094
31095static const struct arm_ext_table armv84a_ext_table[] =
31096{
31097 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31098 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31099 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31100 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31101
31102 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31103 should use the +simd option to turn on FP. */
31104 ARM_REMOVE ("fp", ALL_FP),
31105 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31106 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31107 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31108};
31109
31110static const struct arm_ext_table armv85a_ext_table[] =
31111{
31112 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31113 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31114 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31115 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31116
31117 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31118 should use the +simd option to turn on FP. */
31119 ARM_REMOVE ("fp", ALL_FP),
31120 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31121};
31122
31123static const struct arm_ext_table armv86a_ext_table[] =
31124{
31125 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31126};
31127
31128static const struct arm_ext_table armv8m_main_ext_table[] =
31129{
31130 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31131 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
31132 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31133 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31134 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31135};
31136
31137static const struct arm_ext_table armv8_1m_main_ext_table[] =
31138{
31139 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31140 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
31141 ARM_EXT ("fp",
31142 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31143 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31144 ALL_FP),
31145 ARM_ADD ("fp.dp",
31146 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31147 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31148 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
31149 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
31150 ARM_ADD ("mve.fp",
31151 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31152 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
31153 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31154 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31155};
31156
31157static const struct arm_ext_table armv8r_ext_table[] =
31158{
31159 ARM_ADD ("crc", ARCH_CRC_ARMV8),
31160 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31161 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31162 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31163 ARM_REMOVE ("fp", ALL_FP),
31164 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31165 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31166};
31167
31168/* This list should, at a minimum, contain all the architecture names
31169 recognized by GCC. */
31170#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31171#define ARM_ARCH_OPT2(N, V, DF, ext) \
31172 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31173
31174static const struct arm_arch_option_table arm_archs[] =
31175{
31176 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
31177 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
31178 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
31179 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
31180 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
31181 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
31182 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
31183 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
31184 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
31185 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
31186 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
31187 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
31188 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
31189 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
31190 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
31191 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
31192 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
31193 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31194 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31195 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
31196 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
31197 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31198 kept to preserve existing behaviour. */
31199 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31200 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31201 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
31202 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
31203 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
31204 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31205 kept to preserve existing behaviour. */
31206 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31207 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31208 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
31209 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
31210 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
31211 /* The official spelling of the ARMv7 profile variants is the dashed form.
31212 Accept the non-dashed form for compatibility with old toolchains. */
31213 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31214 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
31215 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31216 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31217 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31218 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31219 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31220 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
31221 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
31222 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
31223 armv8m_main),
31224 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
31225 armv8_1m_main),
31226 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
31227 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
31228 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
31229 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
31230 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
31231 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
31232 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
31233 ARM_ARCH_OPT2 ("armv8.6-a", ARM_ARCH_V8_6A, FPU_ARCH_VFP, armv86a),
31234 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
31235 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
31236 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
31237 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31238};
31239#undef ARM_ARCH_OPT
31240
31241/* ISA extensions in the co-processor and main instruction set space. */
31242
31243struct arm_option_extension_value_table
31244{
31245 const char * name;
31246 size_t name_len;
31247 const arm_feature_set merge_value;
31248 const arm_feature_set clear_value;
31249 /* List of architectures for which an extension is available. ARM_ARCH_NONE
31250 indicates that an extension is available for all architectures while
31251 ARM_ANY marks an empty entry. */
31252 const arm_feature_set allowed_archs[2];
31253};
31254
31255/* The following table must be in alphabetical order with a NULL last entry. */
31256
31257#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
31258#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
31259
31260/* DEPRECATED: Refrain from using this table to add any new extensions, instead
31261 use the context sensitive approach using arm_ext_table's. */
31262static const struct arm_option_extension_value_table arm_extensions[] =
31263{
31264 ARM_EXT_OPT ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
31265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
31266 ARM_ARCH_V8_2A),
31267 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
31268 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31269 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31270 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
31271 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31272 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
31273 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
31274 ARM_ARCH_V8_2A),
31275 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31276 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31277 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
31278 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
31279 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31280 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31282 ARM_ARCH_V8_2A),
31283 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31284 | ARM_EXT2_FP16_FML),
31285 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31286 | ARM_EXT2_FP16_FML),
31287 ARM_ARCH_V8_2A),
31288 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31289 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31290 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31291 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31292 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
31293 Thumb divide instruction. Due to this having the same name as the
31294 previous entry, this will be ignored when doing command-line parsing and
31295 only considered by build attribute selection code. */
31296 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31297 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31298 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
31299 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
31300 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
31301 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
31302 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
31303 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
31304 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
31305 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31306 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31307 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31308 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31309 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31310 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31311 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
31312 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
31313 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
31314 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31315 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31316 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31317 ARM_ARCH_V8A),
31318 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
31319 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
31320 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31321 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
31322 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
31323 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31324 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31325 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31326 ARM_ARCH_V8A),
31327 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31328 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31329 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
31330 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31331 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
31332 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
31333 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31334 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
31335 | ARM_EXT_DIV),
31336 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
31337 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31338 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
31339 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
31340 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
31341};
31342#undef ARM_EXT_OPT
31343
31344/* ISA floating-point and Advanced SIMD extensions. */
31345struct arm_option_fpu_value_table
31346{
31347 const char * name;
31348 const arm_feature_set value;
31349};
31350
31351/* This list should, at a minimum, contain all the fpu names
31352 recognized by GCC. */
31353static const struct arm_option_fpu_value_table arm_fpus[] =
31354{
31355 {"softfpa", FPU_NONE},
31356 {"fpe", FPU_ARCH_FPE},
31357 {"fpe2", FPU_ARCH_FPE},
31358 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
31359 {"fpa", FPU_ARCH_FPA},
31360 {"fpa10", FPU_ARCH_FPA},
31361 {"fpa11", FPU_ARCH_FPA},
31362 {"arm7500fe", FPU_ARCH_FPA},
31363 {"softvfp", FPU_ARCH_VFP},
31364 {"softvfp+vfp", FPU_ARCH_VFP_V2},
31365 {"vfp", FPU_ARCH_VFP_V2},
31366 {"vfp9", FPU_ARCH_VFP_V2},
31367 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
31368 {"vfp10", FPU_ARCH_VFP_V2},
31369 {"vfp10-r0", FPU_ARCH_VFP_V1},
31370 {"vfpxd", FPU_ARCH_VFP_V1xD},
31371 {"vfpv2", FPU_ARCH_VFP_V2},
31372 {"vfpv3", FPU_ARCH_VFP_V3},
31373 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
31374 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
31375 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
31376 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
31377 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
31378 {"arm1020t", FPU_ARCH_VFP_V1},
31379 {"arm1020e", FPU_ARCH_VFP_V2},
31380 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
31381 {"arm1136jf-s", FPU_ARCH_VFP_V2},
31382 {"maverick", FPU_ARCH_MAVERICK},
31383 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31384 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31385 {"neon-fp16", FPU_ARCH_NEON_FP16},
31386 {"vfpv4", FPU_ARCH_VFP_V4},
31387 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
31388 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
31389 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
31390 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
31391 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
31392 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
31393 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
31394 {"crypto-neon-fp-armv8",
31395 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
31396 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
31397 {"crypto-neon-fp-armv8.1",
31398 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
31399 {NULL, ARM_ARCH_NONE}
31400};
31401
31402struct arm_option_value_table
31403{
31404 const char *name;
31405 long value;
31406};
31407
31408static const struct arm_option_value_table arm_float_abis[] =
31409{
31410 {"hard", ARM_FLOAT_ABI_HARD},
31411 {"softfp", ARM_FLOAT_ABI_SOFTFP},
31412 {"soft", ARM_FLOAT_ABI_SOFT},
31413 {NULL, 0}
31414};
31415
31416#ifdef OBJ_ELF
31417/* We only know how to output GNU and ver 4/5 (AAELF) formats. */
31418static const struct arm_option_value_table arm_eabis[] =
31419{
31420 {"gnu", EF_ARM_EABI_UNKNOWN},
31421 {"4", EF_ARM_EABI_VER4},
31422 {"5", EF_ARM_EABI_VER5},
31423 {NULL, 0}
31424};
31425#endif
31426
31427struct arm_long_option_table
31428{
31429 const char * option; /* Substring to match. */
31430 const char * help; /* Help information. */
31431 int (* func) (const char * subopt); /* Function to decode sub-option. */
31432 const char * deprecated; /* If non-null, print this message. */
31433};
31434
31435static bfd_boolean
31436arm_parse_extension (const char *str, const arm_feature_set *opt_set,
31437 arm_feature_set *ext_set,
31438 const struct arm_ext_table *ext_table)
31439{
31440 /* We insist on extensions being specified in alphabetical order, and with
31441 extensions being added before being removed. We achieve this by having
31442 the global ARM_EXTENSIONS table in alphabetical order, and using the
31443 ADDING_VALUE variable to indicate whether we are adding an extension (1)
31444 or removing it (0) and only allowing it to change in the order
31445 -1 -> 1 -> 0. */
31446 const struct arm_option_extension_value_table * opt = NULL;
31447 const arm_feature_set arm_any = ARM_ANY;
31448 int adding_value = -1;
31449
31450 while (str != NULL && *str != 0)
31451 {
31452 const char *ext;
31453 size_t len;
31454
31455 if (*str != '+')
31456 {
31457 as_bad (_("invalid architectural extension"));
31458 return FALSE;
31459 }
31460
31461 str++;
31462 ext = strchr (str, '+');
31463
31464 if (ext != NULL)
31465 len = ext - str;
31466 else
31467 len = strlen (str);
31468
31469 if (len >= 2 && strncmp (str, "no", 2) == 0)
31470 {
31471 if (adding_value != 0)
31472 {
31473 adding_value = 0;
31474 opt = arm_extensions;
31475 }
31476
31477 len -= 2;
31478 str += 2;
31479 }
31480 else if (len > 0)
31481 {
31482 if (adding_value == -1)
31483 {
31484 adding_value = 1;
31485 opt = arm_extensions;
31486 }
31487 else if (adding_value != 1)
31488 {
31489 as_bad (_("must specify extensions to add before specifying "
31490 "those to remove"));
31491 return FALSE;
31492 }
31493 }
31494
31495 if (len == 0)
31496 {
31497 as_bad (_("missing architectural extension"));
31498 return FALSE;
31499 }
31500
31501 gas_assert (adding_value != -1);
31502 gas_assert (opt != NULL);
31503
31504 if (ext_table != NULL)
31505 {
31506 const struct arm_ext_table * ext_opt = ext_table;
31507 bfd_boolean found = FALSE;
31508 for (; ext_opt->name != NULL; ext_opt++)
31509 if (ext_opt->name_len == len
31510 && strncmp (ext_opt->name, str, len) == 0)
31511 {
31512 if (adding_value)
31513 {
31514 if (ARM_FEATURE_ZERO (ext_opt->merge))
31515 /* TODO: Option not supported. When we remove the
31516 legacy table this case should error out. */
31517 continue;
31518
31519 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
31520 }
31521 else
31522 {
31523 if (ARM_FEATURE_ZERO (ext_opt->clear))
31524 /* TODO: Option not supported. When we remove the
31525 legacy table this case should error out. */
31526 continue;
31527 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
31528 }
31529 found = TRUE;
31530 break;
31531 }
31532 if (found)
31533 {
31534 str = ext;
31535 continue;
31536 }
31537 }
31538
31539 /* Scan over the options table trying to find an exact match. */
31540 for (; opt->name != NULL; opt++)
31541 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31542 {
31543 int i, nb_allowed_archs =
31544 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
31545 /* Check we can apply the extension to this architecture. */
31546 for (i = 0; i < nb_allowed_archs; i++)
31547 {
31548 /* Empty entry. */
31549 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
31550 continue;
31551 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
31552 break;
31553 }
31554 if (i == nb_allowed_archs)
31555 {
31556 as_bad (_("extension does not apply to the base architecture"));
31557 return FALSE;
31558 }
31559
31560 /* Add or remove the extension. */
31561 if (adding_value)
31562 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
31563 else
31564 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
31565
31566 /* Allowing Thumb division instructions for ARMv7 in autodetection
31567 rely on this break so that duplicate extensions (extensions
31568 with the same name as a previous extension in the list) are not
31569 considered for command-line parsing. */
31570 break;
31571 }
31572
31573 if (opt->name == NULL)
31574 {
31575 /* Did we fail to find an extension because it wasn't specified in
31576 alphabetical order, or because it does not exist? */
31577
31578 for (opt = arm_extensions; opt->name != NULL; opt++)
31579 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31580 break;
31581
31582 if (opt->name == NULL)
31583 as_bad (_("unknown architectural extension `%s'"), str);
31584 else
31585 as_bad (_("architectural extensions must be specified in "
31586 "alphabetical order"));
31587
31588 return FALSE;
31589 }
31590 else
31591 {
31592 /* We should skip the extension we've just matched the next time
31593 round. */
31594 opt++;
31595 }
31596
31597 str = ext;
31598 };
31599
31600 return TRUE;
31601}
31602
31603static bfd_boolean
31604arm_parse_fp16_opt (const char *str)
31605{
31606 if (strcasecmp (str, "ieee") == 0)
31607 fp16_format = ARM_FP16_FORMAT_IEEE;
31608 else if (strcasecmp (str, "alternative") == 0)
31609 fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
31610 else
31611 {
31612 as_bad (_("unrecognised float16 format \"%s\""), str);
31613 return FALSE;
31614 }
31615
31616 return TRUE;
31617}
31618
31619static bfd_boolean
31620arm_parse_cpu (const char *str)
31621{
31622 const struct arm_cpu_option_table *opt;
31623 const char *ext = strchr (str, '+');
31624 size_t len;
31625
31626 if (ext != NULL)
31627 len = ext - str;
31628 else
31629 len = strlen (str);
31630
31631 if (len == 0)
31632 {
31633 as_bad (_("missing cpu name `%s'"), str);
31634 return FALSE;
31635 }
31636
31637 for (opt = arm_cpus; opt->name != NULL; opt++)
31638 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31639 {
31640 mcpu_cpu_opt = &opt->value;
31641 if (mcpu_ext_opt == NULL)
31642 mcpu_ext_opt = XNEW (arm_feature_set);
31643 *mcpu_ext_opt = opt->ext;
31644 mcpu_fpu_opt = &opt->default_fpu;
31645 if (opt->canonical_name)
31646 {
31647 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
31648 strcpy (selected_cpu_name, opt->canonical_name);
31649 }
31650 else
31651 {
31652 size_t i;
31653
31654 if (len >= sizeof selected_cpu_name)
31655 len = (sizeof selected_cpu_name) - 1;
31656
31657 for (i = 0; i < len; i++)
31658 selected_cpu_name[i] = TOUPPER (opt->name[i]);
31659 selected_cpu_name[i] = 0;
31660 }
31661
31662 if (ext != NULL)
31663 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
31664
31665 return TRUE;
31666 }
31667
31668 as_bad (_("unknown cpu `%s'"), str);
31669 return FALSE;
31670}
31671
31672static bfd_boolean
31673arm_parse_arch (const char *str)
31674{
31675 const struct arm_arch_option_table *opt;
31676 const char *ext = strchr (str, '+');
31677 size_t len;
31678
31679 if (ext != NULL)
31680 len = ext - str;
31681 else
31682 len = strlen (str);
31683
31684 if (len == 0)
31685 {
31686 as_bad (_("missing architecture name `%s'"), str);
31687 return FALSE;
31688 }
31689
31690 for (opt = arm_archs; opt->name != NULL; opt++)
31691 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31692 {
31693 march_cpu_opt = &opt->value;
31694 if (march_ext_opt == NULL)
31695 march_ext_opt = XNEW (arm_feature_set);
31696 *march_ext_opt = arm_arch_none;
31697 march_fpu_opt = &opt->default_fpu;
31698 selected_ctx_ext_table = opt->ext_table;
31699 strcpy (selected_cpu_name, opt->name);
31700
31701 if (ext != NULL)
31702 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
31703 opt->ext_table);
31704
31705 return TRUE;
31706 }
31707
31708 as_bad (_("unknown architecture `%s'\n"), str);
31709 return FALSE;
31710}
31711
31712static bfd_boolean
31713arm_parse_fpu (const char * str)
31714{
31715 const struct arm_option_fpu_value_table * opt;
31716
31717 for (opt = arm_fpus; opt->name != NULL; opt++)
31718 if (streq (opt->name, str))
31719 {
31720 mfpu_opt = &opt->value;
31721 return TRUE;
31722 }
31723
31724 as_bad (_("unknown floating point format `%s'\n"), str);
31725 return FALSE;
31726}
31727
31728static bfd_boolean
31729arm_parse_float_abi (const char * str)
31730{
31731 const struct arm_option_value_table * opt;
31732
31733 for (opt = arm_float_abis; opt->name != NULL; opt++)
31734 if (streq (opt->name, str))
31735 {
31736 mfloat_abi_opt = opt->value;
31737 return TRUE;
31738 }
31739
31740 as_bad (_("unknown floating point abi `%s'\n"), str);
31741 return FALSE;
31742}
31743
31744#ifdef OBJ_ELF
31745static bfd_boolean
31746arm_parse_eabi (const char * str)
31747{
31748 const struct arm_option_value_table *opt;
31749
31750 for (opt = arm_eabis; opt->name != NULL; opt++)
31751 if (streq (opt->name, str))
31752 {
31753 meabi_flags = opt->value;
31754 return TRUE;
31755 }
31756 as_bad (_("unknown EABI `%s'\n"), str);
31757 return FALSE;
31758}
31759#endif
31760
31761static bfd_boolean
31762arm_parse_it_mode (const char * str)
31763{
31764 bfd_boolean ret = TRUE;
31765
31766 if (streq ("arm", str))
31767 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
31768 else if (streq ("thumb", str))
31769 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
31770 else if (streq ("always", str))
31771 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
31772 else if (streq ("never", str))
31773 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
31774 else
31775 {
31776 as_bad (_("unknown implicit IT mode `%s', should be "\
31777 "arm, thumb, always, or never."), str);
31778 ret = FALSE;
31779 }
31780
31781 return ret;
31782}
31783
31784static bfd_boolean
31785arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
31786{
31787 codecomposer_syntax = TRUE;
31788 arm_comment_chars[0] = ';';
31789 arm_line_separator_chars[0] = 0;
31790 return TRUE;
31791}
31792
31793struct arm_long_option_table arm_long_opts[] =
31794{
31795 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
31796 arm_parse_cpu, NULL},
31797 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
31798 arm_parse_arch, NULL},
31799 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
31800 arm_parse_fpu, NULL},
31801 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
31802 arm_parse_float_abi, NULL},
31803#ifdef OBJ_ELF
31804 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
31805 arm_parse_eabi, NULL},
31806#endif
31807 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
31808 arm_parse_it_mode, NULL},
31809 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
31810 arm_ccs_mode, NULL},
31811 {"mfp16-format=",
31812 N_("[ieee|alternative]\n\
31813 set the encoding for half precision floating point "
31814 "numbers to IEEE\n\
31815 or Arm alternative format."),
31816 arm_parse_fp16_opt, NULL },
31817 {NULL, NULL, 0, NULL}
31818};
31819
31820int
31821md_parse_option (int c, const char * arg)
31822{
31823 struct arm_option_table *opt;
31824 const struct arm_legacy_option_table *fopt;
31825 struct arm_long_option_table *lopt;
31826
31827 switch (c)
31828 {
31829#ifdef OPTION_EB
31830 case OPTION_EB:
31831 target_big_endian = 1;
31832 break;
31833#endif
31834
31835#ifdef OPTION_EL
31836 case OPTION_EL:
31837 target_big_endian = 0;
31838 break;
31839#endif
31840
31841 case OPTION_FIX_V4BX:
31842 fix_v4bx = TRUE;
31843 break;
31844
31845#ifdef OBJ_ELF
31846 case OPTION_FDPIC:
31847 arm_fdpic = TRUE;
31848 break;
31849#endif /* OBJ_ELF */
31850
31851 case 'a':
31852 /* Listing option. Just ignore these, we don't support additional
31853 ones. */
31854 return 0;
31855
31856 default:
31857 for (opt = arm_opts; opt->option != NULL; opt++)
31858 {
31859 if (c == opt->option[0]
31860 && ((arg == NULL && opt->option[1] == 0)
31861 || streq (arg, opt->option + 1)))
31862 {
31863 /* If the option is deprecated, tell the user. */
31864 if (warn_on_deprecated && opt->deprecated != NULL)
31865 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
31866 arg ? arg : "", _(opt->deprecated));
31867
31868 if (opt->var != NULL)
31869 *opt->var = opt->value;
31870
31871 return 1;
31872 }
31873 }
31874
31875 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
31876 {
31877 if (c == fopt->option[0]
31878 && ((arg == NULL && fopt->option[1] == 0)
31879 || streq (arg, fopt->option + 1)))
31880 {
31881 /* If the option is deprecated, tell the user. */
31882 if (warn_on_deprecated && fopt->deprecated != NULL)
31883 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
31884 arg ? arg : "", _(fopt->deprecated));
31885
31886 if (fopt->var != NULL)
31887 *fopt->var = &fopt->value;
31888
31889 return 1;
31890 }
31891 }
31892
31893 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
31894 {
31895 /* These options are expected to have an argument. */
31896 if (c == lopt->option[0]
31897 && arg != NULL
31898 && strncmp (arg, lopt->option + 1,
31899 strlen (lopt->option + 1)) == 0)
31900 {
31901 /* If the option is deprecated, tell the user. */
31902 if (warn_on_deprecated && lopt->deprecated != NULL)
31903 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
31904 _(lopt->deprecated));
31905
31906 /* Call the sup-option parser. */
31907 return lopt->func (arg + strlen (lopt->option) - 1);
31908 }
31909 }
31910
31911 return 0;
31912 }
31913
31914 return 1;
31915}
31916
31917void
31918md_show_usage (FILE * fp)
31919{
31920 struct arm_option_table *opt;
31921 struct arm_long_option_table *lopt;
31922
31923 fprintf (fp, _(" ARM-specific assembler options:\n"));
31924
31925 for (opt = arm_opts; opt->option != NULL; opt++)
31926 if (opt->help != NULL)
31927 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
31928
31929 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
31930 if (lopt->help != NULL)
31931 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
31932
31933#ifdef OPTION_EB
31934 fprintf (fp, _("\
31935 -EB assemble code for a big-endian cpu\n"));
31936#endif
31937
31938#ifdef OPTION_EL
31939 fprintf (fp, _("\
31940 -EL assemble code for a little-endian cpu\n"));
31941#endif
31942
31943 fprintf (fp, _("\
31944 --fix-v4bx Allow BX in ARMv4 code\n"));
31945
31946#ifdef OBJ_ELF
31947 fprintf (fp, _("\
31948 --fdpic generate an FDPIC object file\n"));
31949#endif /* OBJ_ELF */
31950}
31951
31952#ifdef OBJ_ELF
31953
31954typedef struct
31955{
31956 int val;
31957 arm_feature_set flags;
31958} cpu_arch_ver_table;
31959
31960/* Mapping from CPU features to EABI CPU arch values. Table must be sorted
31961 chronologically for architectures, with an exception for ARMv6-M and
31962 ARMv6S-M due to legacy reasons. No new architecture should have a
31963 special case. This allows for build attribute selection results to be
31964 stable when new architectures are added. */
31965static const cpu_arch_ver_table cpu_arch_ver[] =
31966{
31967 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
31968 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
31969 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
31970 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
31971 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
31972 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
31973 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
31974 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
31975 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
31976 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
31977 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
31978 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
31979 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
31980 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
31981 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
31982 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
31983 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
31984 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
31985 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
31986 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
31987 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
31988 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
31989 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
31990 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
31991
31992 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
31993 always selected build attributes to match those of ARMv6-M
31994 (resp. ARMv6S-M). However, due to these architectures being a strict
31995 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
31996 would be selected when fully respecting chronology of architectures.
31997 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
31998 move them before ARMv7 architectures. */
31999 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
32000 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
32001
32002 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
32003 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
32004 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
32005 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
32006 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
32007 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
32008 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
32009 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
32010 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
32011 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
32012 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
32013 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
32014 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
32015 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
32016 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
32017 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32018 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_6A},
32019 {-1, ARM_ARCH_NONE}
32020};
32021
32022/* Set an attribute if it has not already been set by the user. */
32023
32024static void
32025aeabi_set_attribute_int (int tag, int value)
32026{
32027 if (tag < 1
32028 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32029 || !attributes_set_explicitly[tag])
32030 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32031}
32032
32033static void
32034aeabi_set_attribute_string (int tag, const char *value)
32035{
32036 if (tag < 1
32037 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32038 || !attributes_set_explicitly[tag])
32039 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32040}
32041
32042/* Return whether features in the *NEEDED feature set are available via
32043 extensions for the architecture whose feature set is *ARCH_FSET. */
32044
32045static bfd_boolean
32046have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32047 const arm_feature_set *needed)
32048{
32049 int i, nb_allowed_archs;
32050 arm_feature_set ext_fset;
32051 const struct arm_option_extension_value_table *opt;
32052
32053 ext_fset = arm_arch_none;
32054 for (opt = arm_extensions; opt->name != NULL; opt++)
32055 {
32056 /* Extension does not provide any feature we need. */
32057 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32058 continue;
32059
32060 nb_allowed_archs =
32061 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32062 for (i = 0; i < nb_allowed_archs; i++)
32063 {
32064 /* Empty entry. */
32065 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32066 break;
32067
32068 /* Extension is available, add it. */
32069 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32070 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32071 }
32072 }
32073
32074 /* Can we enable all features in *needed? */
32075 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32076}
32077
32078/* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32079 a given architecture feature set *ARCH_EXT_FSET including extension feature
32080 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
32081 - if true, check for an exact match of the architecture modulo extensions;
32082 - otherwise, select build attribute value of the first superset
32083 architecture released so that results remains stable when new architectures
32084 are added.
32085 For -march/-mcpu=all the build attribute value of the most featureful
32086 architecture is returned. Tag_CPU_arch_profile result is returned in
32087 PROFILE. */
32088
32089static int
32090get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32091 const arm_feature_set *ext_fset,
32092 char *profile, int exact_match)
32093{
32094 arm_feature_set arch_fset;
32095 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32096
32097 /* Select most featureful architecture with all its extensions if building
32098 for -march=all as the feature sets used to set build attributes. */
32099 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32100 {
32101 /* Force revisiting of decision for each new architecture. */
32102 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32103 *profile = 'A';
32104 return TAG_CPU_ARCH_V8;
32105 }
32106
32107 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32108
32109 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32110 {
32111 arm_feature_set known_arch_fset;
32112
32113 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32114 if (exact_match)
32115 {
32116 /* Base architecture match user-specified architecture and
32117 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
32118 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32119 {
32120 p_ver_ret = p_ver;
32121 goto found;
32122 }
32123 /* Base architecture match user-specified architecture only
32124 (eg. ARMv6-M in the same case as above). Record it in case we
32125 find a match with above condition. */
32126 else if (p_ver_ret == NULL
32127 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32128 p_ver_ret = p_ver;
32129 }
32130 else
32131 {
32132
32133 /* Architecture has all features wanted. */
32134 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32135 {
32136 arm_feature_set added_fset;
32137
32138 /* Compute features added by this architecture over the one
32139 recorded in p_ver_ret. */
32140 if (p_ver_ret != NULL)
32141 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32142 p_ver_ret->flags);
32143 /* First architecture that match incl. with extensions, or the
32144 only difference in features over the recorded match is
32145 features that were optional and are now mandatory. */
32146 if (p_ver_ret == NULL
32147 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32148 {
32149 p_ver_ret = p_ver;
32150 goto found;
32151 }
32152 }
32153 else if (p_ver_ret == NULL)
32154 {
32155 arm_feature_set needed_ext_fset;
32156
32157 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32158
32159 /* Architecture has all features needed when using some
32160 extensions. Record it and continue searching in case there
32161 exist an architecture providing all needed features without
32162 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32163 OS extension). */
32164 if (have_ext_for_needed_feat_p (&known_arch_fset,
32165 &needed_ext_fset))
32166 p_ver_ret = p_ver;
32167 }
32168 }
32169 }
32170
32171 if (p_ver_ret == NULL)
32172 return -1;
32173
32174found:
32175 /* Tag_CPU_arch_profile. */
32176 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32177 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32178 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32179 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
32180 *profile = 'A';
32181 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
32182 *profile = 'R';
32183 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32184 *profile = 'M';
32185 else
32186 *profile = '\0';
32187 return p_ver_ret->val;
32188}
32189
32190/* Set the public EABI object attributes. */
32191
32192static void
32193aeabi_set_public_attributes (void)
32194{
32195 char profile = '\0';
32196 int arch = -1;
32197 int virt_sec = 0;
32198 int fp16_optional = 0;
32199 int skip_exact_match = 0;
32200 arm_feature_set flags, flags_arch, flags_ext;
32201
32202 /* Autodetection mode, choose the architecture based the instructions
32203 actually used. */
32204 if (no_cpu_selected ())
32205 {
32206 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32207
32208 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32209 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32210
32211 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32212 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32213
32214 /* Code run during relaxation relies on selected_cpu being set. */
32215 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32216 flags_ext = arm_arch_none;
32217 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32218 selected_ext = flags_ext;
32219 selected_cpu = flags;
32220 }
32221 /* Otherwise, choose the architecture based on the capabilities of the
32222 requested cpu. */
32223 else
32224 {
32225 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32226 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32227 flags_ext = selected_ext;
32228 flags = selected_cpu;
32229 }
32230 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32231
32232 /* Allow the user to override the reported architecture. */
32233 if (!ARM_FEATURE_ZERO (selected_object_arch))
32234 {
32235 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32236 flags_ext = arm_arch_none;
32237 }
32238 else
32239 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32240
32241 /* When this function is run again after relaxation has happened there is no
32242 way to determine whether an architecture or CPU was specified by the user:
32243 - selected_cpu is set above for relaxation to work;
32244 - march_cpu_opt is not set if only -mcpu or .cpu is used;
32245 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
32246 Therefore, if not in -march=all case we first try an exact match and fall
32247 back to autodetection. */
32248 if (!skip_exact_match)
32249 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
32250 if (arch == -1)
32251 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
32252 if (arch == -1)
32253 as_bad (_("no architecture contains all the instructions used\n"));
32254
32255 /* Tag_CPU_name. */
32256 if (selected_cpu_name[0])
32257 {
32258 char *q;
32259
32260 q = selected_cpu_name;
32261 if (strncmp (q, "armv", 4) == 0)
32262 {
32263 int i;
32264
32265 q += 4;
32266 for (i = 0; q[i]; i++)
32267 q[i] = TOUPPER (q[i]);
32268 }
32269 aeabi_set_attribute_string (Tag_CPU_name, q);
32270 }
32271
32272 /* Tag_CPU_arch. */
32273 aeabi_set_attribute_int (Tag_CPU_arch, arch);
32274
32275 /* Tag_CPU_arch_profile. */
32276 if (profile != '\0')
32277 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
32278
32279 /* Tag_DSP_extension. */
32280 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
32281 aeabi_set_attribute_int (Tag_DSP_extension, 1);
32282
32283 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32284 /* Tag_ARM_ISA_use. */
32285 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
32286 || ARM_FEATURE_ZERO (flags_arch))
32287 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
32288
32289 /* Tag_THUMB_ISA_use. */
32290 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
32291 || ARM_FEATURE_ZERO (flags_arch))
32292 {
32293 int thumb_isa_use;
32294
32295 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32296 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
32297 thumb_isa_use = 3;
32298 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
32299 thumb_isa_use = 2;
32300 else
32301 thumb_isa_use = 1;
32302 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
32303 }
32304
32305 /* Tag_VFP_arch. */
32306 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
32307 aeabi_set_attribute_int (Tag_VFP_arch,
32308 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32309 ? 7 : 8);
32310 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
32311 aeabi_set_attribute_int (Tag_VFP_arch,
32312 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32313 ? 5 : 6);
32314 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
32315 {
32316 fp16_optional = 1;
32317 aeabi_set_attribute_int (Tag_VFP_arch, 3);
32318 }
32319 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
32320 {
32321 aeabi_set_attribute_int (Tag_VFP_arch, 4);
32322 fp16_optional = 1;
32323 }
32324 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
32325 aeabi_set_attribute_int (Tag_VFP_arch, 2);
32326 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
32327 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
32328 aeabi_set_attribute_int (Tag_VFP_arch, 1);
32329
32330 /* Tag_ABI_HardFP_use. */
32331 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
32332 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
32333 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
32334
32335 /* Tag_WMMX_arch. */
32336 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
32337 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
32338 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
32339 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
32340
32341 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
32342 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
32343 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
32344 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
32345 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
32346 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
32347 {
32348 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
32349 {
32350 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
32351 }
32352 else
32353 {
32354 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
32355 fp16_optional = 1;
32356 }
32357 }
32358
32359 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
32360 aeabi_set_attribute_int (Tag_MVE_arch, 2);
32361 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
32362 aeabi_set_attribute_int (Tag_MVE_arch, 1);
32363
32364 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
32365 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
32366 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
32367
32368 /* Tag_DIV_use.
32369
32370 We set Tag_DIV_use to two when integer divide instructions have been used
32371 in ARM state, or when Thumb integer divide instructions have been used,
32372 but we have no architecture profile set, nor have we any ARM instructions.
32373
32374 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
32375 by the base architecture.
32376
32377 For new architectures we will have to check these tests. */
32378 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
32379 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32380 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
32381 aeabi_set_attribute_int (Tag_DIV_use, 0);
32382 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
32383 || (profile == '\0'
32384 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
32385 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
32386 aeabi_set_attribute_int (Tag_DIV_use, 2);
32387
32388 /* Tag_MP_extension_use. */
32389 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
32390 aeabi_set_attribute_int (Tag_MPextension_use, 1);
32391
32392 /* Tag Virtualization_use. */
32393 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
32394 virt_sec |= 1;
32395 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
32396 virt_sec |= 2;
32397 if (virt_sec != 0)
32398 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
32399
32400 if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
32401 aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
32402}
32403
32404/* Post relaxation hook. Recompute ARM attributes now that relaxation is
32405 finished and free extension feature bits which will not be used anymore. */
32406
32407void
32408arm_md_post_relax (void)
32409{
32410 aeabi_set_public_attributes ();
32411 XDELETE (mcpu_ext_opt);
32412 mcpu_ext_opt = NULL;
32413 XDELETE (march_ext_opt);
32414 march_ext_opt = NULL;
32415}
32416
32417/* Add the default contents for the .ARM.attributes section. */
32418
32419void
32420arm_md_end (void)
32421{
32422 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
32423 return;
32424
32425 aeabi_set_public_attributes ();
32426}
32427#endif /* OBJ_ELF */
32428
32429/* Parse a .cpu directive. */
32430
32431static void
32432s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
32433{
32434 const struct arm_cpu_option_table *opt;
32435 char *name;
32436 char saved_char;
32437
32438 name = input_line_pointer;
32439 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32440 input_line_pointer++;
32441 saved_char = *input_line_pointer;
32442 *input_line_pointer = 0;
32443
32444 /* Skip the first "all" entry. */
32445 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
32446 if (streq (opt->name, name))
32447 {
32448 selected_arch = opt->value;
32449 selected_ext = opt->ext;
32450 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32451 if (opt->canonical_name)
32452 strcpy (selected_cpu_name, opt->canonical_name);
32453 else
32454 {
32455 int i;
32456 for (i = 0; opt->name[i]; i++)
32457 selected_cpu_name[i] = TOUPPER (opt->name[i]);
32458
32459 selected_cpu_name[i] = 0;
32460 }
32461 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32462
32463 *input_line_pointer = saved_char;
32464 demand_empty_rest_of_line ();
32465 return;
32466 }
32467 as_bad (_("unknown cpu `%s'"), name);
32468 *input_line_pointer = saved_char;
32469 ignore_rest_of_line ();
32470}
32471
32472/* Parse a .arch directive. */
32473
32474static void
32475s_arm_arch (int ignored ATTRIBUTE_UNUSED)
32476{
32477 const struct arm_arch_option_table *opt;
32478 char saved_char;
32479 char *name;
32480
32481 name = input_line_pointer;
32482 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32483 input_line_pointer++;
32484 saved_char = *input_line_pointer;
32485 *input_line_pointer = 0;
32486
32487 /* Skip the first "all" entry. */
32488 for (opt = arm_archs + 1; opt->name != NULL; opt++)
32489 if (streq (opt->name, name))
32490 {
32491 selected_arch = opt->value;
32492 selected_ext = arm_arch_none;
32493 selected_cpu = selected_arch;
32494 strcpy (selected_cpu_name, opt->name);
32495 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32496 *input_line_pointer = saved_char;
32497 demand_empty_rest_of_line ();
32498 return;
32499 }
32500
32501 as_bad (_("unknown architecture `%s'\n"), name);
32502 *input_line_pointer = saved_char;
32503 ignore_rest_of_line ();
32504}
32505
32506/* Parse a .object_arch directive. */
32507
32508static void
32509s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
32510{
32511 const struct arm_arch_option_table *opt;
32512 char saved_char;
32513 char *name;
32514
32515 name = input_line_pointer;
32516 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32517 input_line_pointer++;
32518 saved_char = *input_line_pointer;
32519 *input_line_pointer = 0;
32520
32521 /* Skip the first "all" entry. */
32522 for (opt = arm_archs + 1; opt->name != NULL; opt++)
32523 if (streq (opt->name, name))
32524 {
32525 selected_object_arch = opt->value;
32526 *input_line_pointer = saved_char;
32527 demand_empty_rest_of_line ();
32528 return;
32529 }
32530
32531 as_bad (_("unknown architecture `%s'\n"), name);
32532 *input_line_pointer = saved_char;
32533 ignore_rest_of_line ();
32534}
32535
32536/* Parse a .arch_extension directive. */
32537
32538static void
32539s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
32540{
32541 const struct arm_option_extension_value_table *opt;
32542 char saved_char;
32543 char *name;
32544 int adding_value = 1;
32545
32546 name = input_line_pointer;
32547 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32548 input_line_pointer++;
32549 saved_char = *input_line_pointer;
32550 *input_line_pointer = 0;
32551
32552 if (strlen (name) >= 2
32553 && strncmp (name, "no", 2) == 0)
32554 {
32555 adding_value = 0;
32556 name += 2;
32557 }
32558
32559 /* Check the context specific extension table */
32560 if (selected_ctx_ext_table)
32561 {
32562 const struct arm_ext_table * ext_opt;
32563 for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
32564 {
32565 if (streq (ext_opt->name, name))
32566 {
32567 if (adding_value)
32568 {
32569 if (ARM_FEATURE_ZERO (ext_opt->merge))
32570 /* TODO: Option not supported. When we remove the
32571 legacy table this case should error out. */
32572 continue;
32573 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32574 ext_opt->merge);
32575 }
32576 else
32577 ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
32578
32579 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32580 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32581 *input_line_pointer = saved_char;
32582 demand_empty_rest_of_line ();
32583 return;
32584 }
32585 }
32586 }
32587
32588 for (opt = arm_extensions; opt->name != NULL; opt++)
32589 if (streq (opt->name, name))
32590 {
32591 int i, nb_allowed_archs =
32592 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
32593 for (i = 0; i < nb_allowed_archs; i++)
32594 {
32595 /* Empty entry. */
32596 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
32597 continue;
32598 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
32599 break;
32600 }
32601
32602 if (i == nb_allowed_archs)
32603 {
32604 as_bad (_("architectural extension `%s' is not allowed for the "
32605 "current base architecture"), name);
32606 break;
32607 }
32608
32609 if (adding_value)
32610 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32611 opt->merge_value);
32612 else
32613 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
32614
32615 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32616 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32617 *input_line_pointer = saved_char;
32618 demand_empty_rest_of_line ();
32619 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
32620 on this return so that duplicate extensions (extensions with the
32621 same name as a previous extension in the list) are not considered
32622 for command-line parsing. */
32623 return;
32624 }
32625
32626 if (opt->name == NULL)
32627 as_bad (_("unknown architecture extension `%s'\n"), name);
32628
32629 *input_line_pointer = saved_char;
32630 ignore_rest_of_line ();
32631}
32632
32633/* Parse a .fpu directive. */
32634
32635static void
32636s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
32637{
32638 const struct arm_option_fpu_value_table *opt;
32639 char saved_char;
32640 char *name;
32641
32642 name = input_line_pointer;
32643 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32644 input_line_pointer++;
32645 saved_char = *input_line_pointer;
32646 *input_line_pointer = 0;
32647
32648 for (opt = arm_fpus; opt->name != NULL; opt++)
32649 if (streq (opt->name, name))
32650 {
32651 selected_fpu = opt->value;
32652#ifndef CPU_DEFAULT
32653 if (no_cpu_selected ())
32654 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
32655 else
32656#endif
32657 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32658 *input_line_pointer = saved_char;
32659 demand_empty_rest_of_line ();
32660 return;
32661 }
32662
32663 as_bad (_("unknown floating point format `%s'\n"), name);
32664 *input_line_pointer = saved_char;
32665 ignore_rest_of_line ();
32666}
32667
32668/* Copy symbol information. */
32669
32670void
32671arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
32672{
32673 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
32674}
32675
32676#ifdef OBJ_ELF
32677/* Given a symbolic attribute NAME, return the proper integer value.
32678 Returns -1 if the attribute is not known. */
32679
32680int
32681arm_convert_symbolic_attribute (const char *name)
32682{
32683 static const struct
32684 {
32685 const char * name;
32686 const int tag;
32687 }
32688 attribute_table[] =
32689 {
32690 /* When you modify this table you should
32691 also modify the list in doc/c-arm.texi. */
32692#define T(tag) {#tag, tag}
32693 T (Tag_CPU_raw_name),
32694 T (Tag_CPU_name),
32695 T (Tag_CPU_arch),
32696 T (Tag_CPU_arch_profile),
32697 T (Tag_ARM_ISA_use),
32698 T (Tag_THUMB_ISA_use),
32699 T (Tag_FP_arch),
32700 T (Tag_VFP_arch),
32701 T (Tag_WMMX_arch),
32702 T (Tag_Advanced_SIMD_arch),
32703 T (Tag_PCS_config),
32704 T (Tag_ABI_PCS_R9_use),
32705 T (Tag_ABI_PCS_RW_data),
32706 T (Tag_ABI_PCS_RO_data),
32707 T (Tag_ABI_PCS_GOT_use),
32708 T (Tag_ABI_PCS_wchar_t),
32709 T (Tag_ABI_FP_rounding),
32710 T (Tag_ABI_FP_denormal),
32711 T (Tag_ABI_FP_exceptions),
32712 T (Tag_ABI_FP_user_exceptions),
32713 T (Tag_ABI_FP_number_model),
32714 T (Tag_ABI_align_needed),
32715 T (Tag_ABI_align8_needed),
32716 T (Tag_ABI_align_preserved),
32717 T (Tag_ABI_align8_preserved),
32718 T (Tag_ABI_enum_size),
32719 T (Tag_ABI_HardFP_use),
32720 T (Tag_ABI_VFP_args),
32721 T (Tag_ABI_WMMX_args),
32722 T (Tag_ABI_optimization_goals),
32723 T (Tag_ABI_FP_optimization_goals),
32724 T (Tag_compatibility),
32725 T (Tag_CPU_unaligned_access),
32726 T (Tag_FP_HP_extension),
32727 T (Tag_VFP_HP_extension),
32728 T (Tag_ABI_FP_16bit_format),
32729 T (Tag_MPextension_use),
32730 T (Tag_DIV_use),
32731 T (Tag_nodefaults),
32732 T (Tag_also_compatible_with),
32733 T (Tag_conformance),
32734 T (Tag_T2EE_use),
32735 T (Tag_Virtualization_use),
32736 T (Tag_DSP_extension),
32737 T (Tag_MVE_arch),
32738 /* We deliberately do not include Tag_MPextension_use_legacy. */
32739#undef T
32740 };
32741 unsigned int i;
32742
32743 if (name == NULL)
32744 return -1;
32745
32746 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
32747 if (streq (name, attribute_table[i].name))
32748 return attribute_table[i].tag;
32749
32750 return -1;
32751}
32752
32753/* Apply sym value for relocations only in the case that they are for
32754 local symbols in the same segment as the fixup and you have the
32755 respective architectural feature for blx and simple switches. */
32756
32757int
32758arm_apply_sym_value (struct fix * fixP, segT this_seg)
32759{
32760 if (fixP->fx_addsy
32761 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
32762 /* PR 17444: If the local symbol is in a different section then a reloc
32763 will always be generated for it, so applying the symbol value now
32764 will result in a double offset being stored in the relocation. */
32765 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
32766 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
32767 {
32768 switch (fixP->fx_r_type)
32769 {
32770 case BFD_RELOC_ARM_PCREL_BLX:
32771 case BFD_RELOC_THUMB_PCREL_BRANCH23:
32772 if (ARM_IS_FUNC (fixP->fx_addsy))
32773 return 1;
32774 break;
32775
32776 case BFD_RELOC_ARM_PCREL_CALL:
32777 case BFD_RELOC_THUMB_PCREL_BLX:
32778 if (THUMB_IS_FUNC (fixP->fx_addsy))
32779 return 1;
32780 break;
32781
32782 default:
32783 break;
32784 }
32785
32786 }
32787 return 0;
32788}
32789#endif /* OBJ_ELF */
This page took 0.144953 seconds and 4 git commands to generate.