[PATCH 8/57][Arm][GAS] Add support for MVE instructions: vcvt
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 static const arm_feature_set mve_ext =
306 ARM_FEATURE_COPROC (FPU_MVE);
307 static const arm_feature_set mve_fp_ext =
308 ARM_FEATURE_COPROC (FPU_MVE_FP);
309 #ifdef OBJ_ELF
310 static const arm_feature_set fpu_vfp_fp16 =
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
312 static const arm_feature_set fpu_neon_ext_fma =
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
314 #endif
315 static const arm_feature_set fpu_vfp_ext_fma =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
317 static const arm_feature_set fpu_vfp_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
319 static const arm_feature_set fpu_vfp_ext_armv8xd =
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
321 static const arm_feature_set fpu_neon_ext_armv8 =
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
323 static const arm_feature_set fpu_crypto_ext_armv8 =
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
325 static const arm_feature_set crc_ext_armv8 =
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
327 static const arm_feature_set fpu_neon_ext_v8_1 =
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
329 static const arm_feature_set fpu_neon_ext_dotprod =
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
331
332 static int mfloat_abi_opt = -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
334 directive. */
335 static arm_feature_set selected_arch = ARM_ARCH_NONE;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
337 directive. */
338 static arm_feature_set selected_ext = ARM_ARCH_NONE;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
341 directive. */
342 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu = FPU_NONE;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name[20];
349
350 extern FLONUM_TYPE generic_floating_point_number;
351
352 /* Return if no cpu was selected on command-line. */
353 static bfd_boolean
354 no_cpu_selected (void)
355 {
356 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
357 }
358
359 #ifdef OBJ_ELF
360 # ifdef EABI_DEFAULT
361 static int meabi_flags = EABI_DEFAULT;
362 # else
363 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
364 # endif
365
366 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
367
368 bfd_boolean
369 arm_is_eabi (void)
370 {
371 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
372 }
373 #endif
374
375 #ifdef OBJ_ELF
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS * GOT_symbol;
378 #endif
379
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
383 instructions. */
384 static int thumb_mode = 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
389
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
392 {
393 IMPLICIT_IT_MODE_NEVER = 0x00,
394 IMPLICIT_IT_MODE_ARM = 0x01,
395 IMPLICIT_IT_MODE_THUMB = 0x02,
396 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
397 };
398 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
399
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
402
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
407 there.)
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
410 machine code.
411
412 Important differences from the old Thumb mode:
413
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
422
423 static bfd_boolean unified_syntax = FALSE;
424
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars[] = "#[]{}";
430
431 enum neon_el_type
432 {
433 NT_invtype,
434 NT_untyped,
435 NT_integer,
436 NT_float,
437 NT_poly,
438 NT_signed,
439 NT_unsigned
440 };
441
442 struct neon_type_el
443 {
444 enum neon_el_type type;
445 unsigned size;
446 };
447
448 #define NEON_MAX_TYPE_ELS 4
449
450 struct neon_type
451 {
452 struct neon_type_el el[NEON_MAX_TYPE_ELS];
453 unsigned elems;
454 };
455
456 enum pred_instruction_type
457 {
458 OUTSIDE_PRED_INSN,
459 INSIDE_VPT_INSN,
460 INSIDE_IT_INSN,
461 INSIDE_IT_LAST_INSN,
462 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN, /* The IT insn has been parsed. */
467 VPT_INSN, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
471 };
472
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
476
477 struct arm_it
478 {
479 const char * error;
480 unsigned long instruction;
481 int size;
482 int size_req;
483 int cond;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
486 appropriate. */
487 int uncond_value;
488 struct neon_type vectype;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
491 int is_neon;
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
494 unsigned long relax;
495 struct
496 {
497 bfd_reloc_code_real_type type;
498 expressionS exp;
499 int pc_rel;
500 } relocs[ARM_IT_MAX_RELOCS];
501
502 enum pred_instruction_type pred_insn_type;
503
504 struct
505 {
506 unsigned reg;
507 signed int imm;
508 struct neon_type_el vectype;
509 unsigned present : 1; /* Operand present. */
510 unsigned isreg : 1; /* Operand was a register. */
511 unsigned immisreg : 2; /* .imm field is a second register.
512 0: imm, 1: gpr, 2: MVE Q-register. */
513 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
514 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
515 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
516 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
517 instructions. This allows us to disambiguate ARM <-> vector insns. */
518 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
519 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
520 unsigned isquad : 1; /* Operand is SIMD quad register. */
521 unsigned issingle : 1; /* Operand is VFP single-precision register. */
522 unsigned hasreloc : 1; /* Operand has relocation suffix. */
523 unsigned writeback : 1; /* Operand has trailing ! */
524 unsigned preind : 1; /* Preindexed address. */
525 unsigned postind : 1; /* Postindexed address. */
526 unsigned negative : 1; /* Index register was negated. */
527 unsigned shifted : 1; /* Shift applied to operation. */
528 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
529 } operands[ARM_IT_MAX_OPERANDS];
530 };
531
532 static struct arm_it inst;
533
534 #define NUM_FLOAT_VALS 8
535
536 const char * fp_const[] =
537 {
538 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
539 };
540
541 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
542
543 #define FAIL (-1)
544 #define SUCCESS (0)
545
546 #define SUFF_S 1
547 #define SUFF_D 2
548 #define SUFF_E 3
549 #define SUFF_P 4
550
551 #define CP_T_X 0x00008000
552 #define CP_T_Y 0x00400000
553
554 #define CONDS_BIT 0x00100000
555 #define LOAD_BIT 0x00100000
556
557 #define DOUBLE_LOAD_FLAG 0x00000001
558
559 struct asm_cond
560 {
561 const char * template_name;
562 unsigned long value;
563 };
564
565 #define COND_ALWAYS 0xE
566
567 struct asm_psr
568 {
569 const char * template_name;
570 unsigned long field;
571 };
572
573 struct asm_barrier_opt
574 {
575 const char * template_name;
576 unsigned long value;
577 const arm_feature_set arch;
578 };
579
580 /* The bit that distinguishes CPSR and SPSR. */
581 #define SPSR_BIT (1 << 22)
582
583 /* The individual PSR flag bits. */
584 #define PSR_c (1 << 16)
585 #define PSR_x (1 << 17)
586 #define PSR_s (1 << 18)
587 #define PSR_f (1 << 19)
588
589 struct reloc_entry
590 {
591 const char * name;
592 bfd_reloc_code_real_type reloc;
593 };
594
595 enum vfp_reg_pos
596 {
597 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
598 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
599 };
600
601 enum vfp_ldstm_type
602 {
603 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
604 };
605
606 /* Bits for DEFINED field in neon_typed_alias. */
607 #define NTA_HASTYPE 1
608 #define NTA_HASINDEX 2
609
610 struct neon_typed_alias
611 {
612 unsigned char defined;
613 unsigned char index;
614 struct neon_type_el eltype;
615 };
616
617 /* ARM register categories. This includes coprocessor numbers and various
618 architecture extensions' registers. Each entry should have an error message
619 in reg_expected_msgs below. */
620 enum arm_reg_type
621 {
622 REG_TYPE_RN,
623 REG_TYPE_CP,
624 REG_TYPE_CN,
625 REG_TYPE_FN,
626 REG_TYPE_VFS,
627 REG_TYPE_VFD,
628 REG_TYPE_NQ,
629 REG_TYPE_VFSD,
630 REG_TYPE_NDQ,
631 REG_TYPE_NSD,
632 REG_TYPE_NSDQ,
633 REG_TYPE_VFC,
634 REG_TYPE_MVF,
635 REG_TYPE_MVD,
636 REG_TYPE_MVFX,
637 REG_TYPE_MVDX,
638 REG_TYPE_MVAX,
639 REG_TYPE_MQ,
640 REG_TYPE_DSPSC,
641 REG_TYPE_MMXWR,
642 REG_TYPE_MMXWC,
643 REG_TYPE_MMXWCG,
644 REG_TYPE_XSCALE,
645 REG_TYPE_RNB,
646 };
647
648 /* Structure for a hash table entry for a register.
649 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
650 information which states whether a vector type or index is specified (for a
651 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
652 struct reg_entry
653 {
654 const char * name;
655 unsigned int number;
656 unsigned char type;
657 unsigned char builtin;
658 struct neon_typed_alias * neon;
659 };
660
661 /* Diagnostics used when we don't get a register of the expected type. */
662 const char * const reg_expected_msgs[] =
663 {
664 [REG_TYPE_RN] = N_("ARM register expected"),
665 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
666 [REG_TYPE_CN] = N_("co-processor register expected"),
667 [REG_TYPE_FN] = N_("FPA register expected"),
668 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
669 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
670 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
671 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
672 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
673 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
674 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
675 " expected"),
676 [REG_TYPE_VFC] = N_("VFP system register expected"),
677 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
678 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
679 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
680 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
681 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
682 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
683 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
684 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
685 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
686 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
687 [REG_TYPE_MQ] = N_("MVE vector register expected"),
688 [REG_TYPE_RNB] = N_("")
689 };
690
691 /* Some well known registers that we refer to directly elsewhere. */
692 #define REG_R12 12
693 #define REG_SP 13
694 #define REG_LR 14
695 #define REG_PC 15
696
697 /* ARM instructions take 4bytes in the object file, Thumb instructions
698 take 2: */
699 #define INSN_SIZE 4
700
701 struct asm_opcode
702 {
703 /* Basic string to match. */
704 const char * template_name;
705
706 /* Parameters to instruction. */
707 unsigned int operands[8];
708
709 /* Conditional tag - see opcode_lookup. */
710 unsigned int tag : 4;
711
712 /* Basic instruction code. */
713 unsigned int avalue;
714
715 /* Thumb-format instruction code. */
716 unsigned int tvalue;
717
718 /* Which architecture variant provides this instruction. */
719 const arm_feature_set * avariant;
720 const arm_feature_set * tvariant;
721
722 /* Function to call to encode instruction in ARM format. */
723 void (* aencode) (void);
724
725 /* Function to call to encode instruction in Thumb format. */
726 void (* tencode) (void);
727
728 /* Indicates whether this instruction may be vector predicated. */
729 unsigned int mayBeVecPred : 1;
730 };
731
732 /* Defines for various bits that we will want to toggle. */
733 #define INST_IMMEDIATE 0x02000000
734 #define OFFSET_REG 0x02000000
735 #define HWOFFSET_IMM 0x00400000
736 #define SHIFT_BY_REG 0x00000010
737 #define PRE_INDEX 0x01000000
738 #define INDEX_UP 0x00800000
739 #define WRITE_BACK 0x00200000
740 #define LDM_TYPE_2_OR_3 0x00400000
741 #define CPSI_MMOD 0x00020000
742
743 #define LITERAL_MASK 0xf000f000
744 #define OPCODE_MASK 0xfe1fffff
745 #define V4_STR_BIT 0x00000020
746 #define VLDR_VMOV_SAME 0x0040f000
747
748 #define T2_SUBS_PC_LR 0xf3de8f00
749
750 #define DATA_OP_SHIFT 21
751 #define SBIT_SHIFT 20
752
753 #define T2_OPCODE_MASK 0xfe1fffff
754 #define T2_DATA_OP_SHIFT 21
755 #define T2_SBIT_SHIFT 20
756
757 #define A_COND_MASK 0xf0000000
758 #define A_PUSH_POP_OP_MASK 0x0fff0000
759
760 /* Opcodes for pushing/poping registers to/from the stack. */
761 #define A1_OPCODE_PUSH 0x092d0000
762 #define A2_OPCODE_PUSH 0x052d0004
763 #define A2_OPCODE_POP 0x049d0004
764
765 /* Codes to distinguish the arithmetic instructions. */
766 #define OPCODE_AND 0
767 #define OPCODE_EOR 1
768 #define OPCODE_SUB 2
769 #define OPCODE_RSB 3
770 #define OPCODE_ADD 4
771 #define OPCODE_ADC 5
772 #define OPCODE_SBC 6
773 #define OPCODE_RSC 7
774 #define OPCODE_TST 8
775 #define OPCODE_TEQ 9
776 #define OPCODE_CMP 10
777 #define OPCODE_CMN 11
778 #define OPCODE_ORR 12
779 #define OPCODE_MOV 13
780 #define OPCODE_BIC 14
781 #define OPCODE_MVN 15
782
783 #define T2_OPCODE_AND 0
784 #define T2_OPCODE_BIC 1
785 #define T2_OPCODE_ORR 2
786 #define T2_OPCODE_ORN 3
787 #define T2_OPCODE_EOR 4
788 #define T2_OPCODE_ADD 8
789 #define T2_OPCODE_ADC 10
790 #define T2_OPCODE_SBC 11
791 #define T2_OPCODE_SUB 13
792 #define T2_OPCODE_RSB 14
793
794 #define T_OPCODE_MUL 0x4340
795 #define T_OPCODE_TST 0x4200
796 #define T_OPCODE_CMN 0x42c0
797 #define T_OPCODE_NEG 0x4240
798 #define T_OPCODE_MVN 0x43c0
799
800 #define T_OPCODE_ADD_R3 0x1800
801 #define T_OPCODE_SUB_R3 0x1a00
802 #define T_OPCODE_ADD_HI 0x4400
803 #define T_OPCODE_ADD_ST 0xb000
804 #define T_OPCODE_SUB_ST 0xb080
805 #define T_OPCODE_ADD_SP 0xa800
806 #define T_OPCODE_ADD_PC 0xa000
807 #define T_OPCODE_ADD_I8 0x3000
808 #define T_OPCODE_SUB_I8 0x3800
809 #define T_OPCODE_ADD_I3 0x1c00
810 #define T_OPCODE_SUB_I3 0x1e00
811
812 #define T_OPCODE_ASR_R 0x4100
813 #define T_OPCODE_LSL_R 0x4080
814 #define T_OPCODE_LSR_R 0x40c0
815 #define T_OPCODE_ROR_R 0x41c0
816 #define T_OPCODE_ASR_I 0x1000
817 #define T_OPCODE_LSL_I 0x0000
818 #define T_OPCODE_LSR_I 0x0800
819
820 #define T_OPCODE_MOV_I8 0x2000
821 #define T_OPCODE_CMP_I8 0x2800
822 #define T_OPCODE_CMP_LR 0x4280
823 #define T_OPCODE_MOV_HR 0x4600
824 #define T_OPCODE_CMP_HR 0x4500
825
826 #define T_OPCODE_LDR_PC 0x4800
827 #define T_OPCODE_LDR_SP 0x9800
828 #define T_OPCODE_STR_SP 0x9000
829 #define T_OPCODE_LDR_IW 0x6800
830 #define T_OPCODE_STR_IW 0x6000
831 #define T_OPCODE_LDR_IH 0x8800
832 #define T_OPCODE_STR_IH 0x8000
833 #define T_OPCODE_LDR_IB 0x7800
834 #define T_OPCODE_STR_IB 0x7000
835 #define T_OPCODE_LDR_RW 0x5800
836 #define T_OPCODE_STR_RW 0x5000
837 #define T_OPCODE_LDR_RH 0x5a00
838 #define T_OPCODE_STR_RH 0x5200
839 #define T_OPCODE_LDR_RB 0x5c00
840 #define T_OPCODE_STR_RB 0x5400
841
842 #define T_OPCODE_PUSH 0xb400
843 #define T_OPCODE_POP 0xbc00
844
845 #define T_OPCODE_BRANCH 0xe000
846
847 #define THUMB_SIZE 2 /* Size of thumb instruction. */
848 #define THUMB_PP_PC_LR 0x0100
849 #define THUMB_LOAD_BIT 0x0800
850 #define THUMB2_LOAD_BIT 0x00100000
851
852 #define BAD_SYNTAX _("syntax error")
853 #define BAD_ARGS _("bad arguments to instruction")
854 #define BAD_SP _("r13 not allowed here")
855 #define BAD_PC _("r15 not allowed here")
856 #define BAD_ODD _("Odd register not allowed here")
857 #define BAD_EVEN _("Even register not allowed here")
858 #define BAD_COND _("instruction cannot be conditional")
859 #define BAD_OVERLAP _("registers may not be the same")
860 #define BAD_HIREG _("lo register required")
861 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
862 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
863 #define BAD_BRANCH _("branch must be last instruction in IT block")
864 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
865 #define BAD_NOT_IT _("instruction not allowed in IT block")
866 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
867 #define BAD_FPU _("selected FPU does not support instruction")
868 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
869 #define BAD_OUT_VPT \
870 _("vector predicated instruction should be in VPT/VPST block")
871 #define BAD_IT_COND _("incorrect condition in IT block")
872 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
873 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
874 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
875 #define BAD_PC_ADDRESSING \
876 _("cannot use register index with PC-relative addressing")
877 #define BAD_PC_WRITEBACK \
878 _("cannot use writeback with PC-relative addressing")
879 #define BAD_RANGE _("branch out of range")
880 #define BAD_FP16 _("selected processor does not support fp16 instruction")
881 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
882 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
883 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
884 "block")
885 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
886 "block")
887 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
888 " operand")
889 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
890 " operand")
891 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
892 #define BAD_MVE_AUTO \
893 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
894 " use a valid -march or -mcpu option.")
895 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
896 "and source operands makes instruction UNPREDICTABLE")
897 #define BAD_EL_TYPE _("bad element type for instruction")
898
899 static struct hash_control * arm_ops_hsh;
900 static struct hash_control * arm_cond_hsh;
901 static struct hash_control * arm_vcond_hsh;
902 static struct hash_control * arm_shift_hsh;
903 static struct hash_control * arm_psr_hsh;
904 static struct hash_control * arm_v7m_psr_hsh;
905 static struct hash_control * arm_reg_hsh;
906 static struct hash_control * arm_reloc_hsh;
907 static struct hash_control * arm_barrier_opt_hsh;
908
909 /* Stuff needed to resolve the label ambiguity
910 As:
911 ...
912 label: <insn>
913 may differ from:
914 ...
915 label:
916 <insn> */
917
918 symbolS * last_label_seen;
919 static int label_is_thumb_function_name = FALSE;
920
921 /* Literal pool structure. Held on a per-section
922 and per-sub-section basis. */
923
924 #define MAX_LITERAL_POOL_SIZE 1024
925 typedef struct literal_pool
926 {
927 expressionS literals [MAX_LITERAL_POOL_SIZE];
928 unsigned int next_free_entry;
929 unsigned int id;
930 symbolS * symbol;
931 segT section;
932 subsegT sub_section;
933 #ifdef OBJ_ELF
934 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
935 #endif
936 struct literal_pool * next;
937 unsigned int alignment;
938 } literal_pool;
939
940 /* Pointer to a linked list of literal pools. */
941 literal_pool * list_of_pools = NULL;
942
943 typedef enum asmfunc_states
944 {
945 OUTSIDE_ASMFUNC,
946 WAITING_ASMFUNC_NAME,
947 WAITING_ENDASMFUNC
948 } asmfunc_states;
949
950 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
951
952 #ifdef OBJ_ELF
953 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
954 #else
955 static struct current_pred now_pred;
956 #endif
957
958 static inline int
959 now_pred_compatible (int cond)
960 {
961 return (cond & ~1) == (now_pred.cc & ~1);
962 }
963
964 static inline int
965 conditional_insn (void)
966 {
967 return inst.cond != COND_ALWAYS;
968 }
969
970 static int in_pred_block (void);
971
972 static int handle_pred_state (void);
973
974 static void force_automatic_it_block_close (void);
975
976 static void it_fsm_post_encode (void);
977
978 #define set_pred_insn_type(type) \
979 do \
980 { \
981 inst.pred_insn_type = type; \
982 if (handle_pred_state () == FAIL) \
983 return; \
984 } \
985 while (0)
986
987 #define set_pred_insn_type_nonvoid(type, failret) \
988 do \
989 { \
990 inst.pred_insn_type = type; \
991 if (handle_pred_state () == FAIL) \
992 return failret; \
993 } \
994 while(0)
995
996 #define set_pred_insn_type_last() \
997 do \
998 { \
999 if (inst.cond == COND_ALWAYS) \
1000 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1001 else \
1002 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1003 } \
1004 while (0)
1005
1006 /* Pure syntax. */
1007
1008 /* This array holds the chars that always start a comment. If the
1009 pre-processor is disabled, these aren't very useful. */
1010 char arm_comment_chars[] = "@";
1011
1012 /* This array holds the chars that only start a comment at the beginning of
1013 a line. If the line seems to have the form '# 123 filename'
1014 .line and .file directives will appear in the pre-processed output. */
1015 /* Note that input_file.c hand checks for '#' at the beginning of the
1016 first line of the input file. This is because the compiler outputs
1017 #NO_APP at the beginning of its output. */
1018 /* Also note that comments like this one will always work. */
1019 const char line_comment_chars[] = "#";
1020
1021 char arm_line_separator_chars[] = ";";
1022
1023 /* Chars that can be used to separate mant
1024 from exp in floating point numbers. */
1025 const char EXP_CHARS[] = "eE";
1026
1027 /* Chars that mean this number is a floating point constant. */
1028 /* As in 0f12.456 */
1029 /* or 0d1.2345e12 */
1030
1031 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
1032
1033 /* Prefix characters that indicate the start of an immediate
1034 value. */
1035 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1036
1037 /* Separator character handling. */
1038
1039 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1040
1041 static inline int
1042 skip_past_char (char ** str, char c)
1043 {
1044 /* PR gas/14987: Allow for whitespace before the expected character. */
1045 skip_whitespace (*str);
1046
1047 if (**str == c)
1048 {
1049 (*str)++;
1050 return SUCCESS;
1051 }
1052 else
1053 return FAIL;
1054 }
1055
1056 #define skip_past_comma(str) skip_past_char (str, ',')
1057
1058 /* Arithmetic expressions (possibly involving symbols). */
1059
1060 /* Return TRUE if anything in the expression is a bignum. */
1061
1062 static bfd_boolean
1063 walk_no_bignums (symbolS * sp)
1064 {
1065 if (symbol_get_value_expression (sp)->X_op == O_big)
1066 return TRUE;
1067
1068 if (symbol_get_value_expression (sp)->X_add_symbol)
1069 {
1070 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1071 || (symbol_get_value_expression (sp)->X_op_symbol
1072 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1073 }
1074
1075 return FALSE;
1076 }
1077
1078 static bfd_boolean in_my_get_expression = FALSE;
1079
1080 /* Third argument to my_get_expression. */
1081 #define GE_NO_PREFIX 0
1082 #define GE_IMM_PREFIX 1
1083 #define GE_OPT_PREFIX 2
1084 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1085 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1086 #define GE_OPT_PREFIX_BIG 3
1087
1088 static int
1089 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1090 {
1091 char * save_in;
1092
1093 /* In unified syntax, all prefixes are optional. */
1094 if (unified_syntax)
1095 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1096 : GE_OPT_PREFIX;
1097
1098 switch (prefix_mode)
1099 {
1100 case GE_NO_PREFIX: break;
1101 case GE_IMM_PREFIX:
1102 if (!is_immediate_prefix (**str))
1103 {
1104 inst.error = _("immediate expression requires a # prefix");
1105 return FAIL;
1106 }
1107 (*str)++;
1108 break;
1109 case GE_OPT_PREFIX:
1110 case GE_OPT_PREFIX_BIG:
1111 if (is_immediate_prefix (**str))
1112 (*str)++;
1113 break;
1114 default:
1115 abort ();
1116 }
1117
1118 memset (ep, 0, sizeof (expressionS));
1119
1120 save_in = input_line_pointer;
1121 input_line_pointer = *str;
1122 in_my_get_expression = TRUE;
1123 expression (ep);
1124 in_my_get_expression = FALSE;
1125
1126 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1127 {
1128 /* We found a bad or missing expression in md_operand(). */
1129 *str = input_line_pointer;
1130 input_line_pointer = save_in;
1131 if (inst.error == NULL)
1132 inst.error = (ep->X_op == O_absent
1133 ? _("missing expression") :_("bad expression"));
1134 return 1;
1135 }
1136
1137 /* Get rid of any bignums now, so that we don't generate an error for which
1138 we can't establish a line number later on. Big numbers are never valid
1139 in instructions, which is where this routine is always called. */
1140 if (prefix_mode != GE_OPT_PREFIX_BIG
1141 && (ep->X_op == O_big
1142 || (ep->X_add_symbol
1143 && (walk_no_bignums (ep->X_add_symbol)
1144 || (ep->X_op_symbol
1145 && walk_no_bignums (ep->X_op_symbol))))))
1146 {
1147 inst.error = _("invalid constant");
1148 *str = input_line_pointer;
1149 input_line_pointer = save_in;
1150 return 1;
1151 }
1152
1153 *str = input_line_pointer;
1154 input_line_pointer = save_in;
1155 return SUCCESS;
1156 }
1157
1158 /* Turn a string in input_line_pointer into a floating point constant
1159 of type TYPE, and store the appropriate bytes in *LITP. The number
1160 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1161 returned, or NULL on OK.
1162
1163 Note that fp constants aren't represent in the normal way on the ARM.
1164 In big endian mode, things are as expected. However, in little endian
1165 mode fp constants are big-endian word-wise, and little-endian byte-wise
1166 within the words. For example, (double) 1.1 in big endian mode is
1167 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1168 the byte sequence 99 99 f1 3f 9a 99 99 99.
1169
1170 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1171
1172 const char *
1173 md_atof (int type, char * litP, int * sizeP)
1174 {
1175 int prec;
1176 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1177 char *t;
1178 int i;
1179
1180 switch (type)
1181 {
1182 case 'f':
1183 case 'F':
1184 case 's':
1185 case 'S':
1186 prec = 2;
1187 break;
1188
1189 case 'd':
1190 case 'D':
1191 case 'r':
1192 case 'R':
1193 prec = 4;
1194 break;
1195
1196 case 'x':
1197 case 'X':
1198 prec = 5;
1199 break;
1200
1201 case 'p':
1202 case 'P':
1203 prec = 5;
1204 break;
1205
1206 default:
1207 *sizeP = 0;
1208 return _("Unrecognized or unsupported floating point constant");
1209 }
1210
1211 t = atof_ieee (input_line_pointer, type, words);
1212 if (t)
1213 input_line_pointer = t;
1214 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1215
1216 if (target_big_endian)
1217 {
1218 for (i = 0; i < prec; i++)
1219 {
1220 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1221 litP += sizeof (LITTLENUM_TYPE);
1222 }
1223 }
1224 else
1225 {
1226 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1227 for (i = prec - 1; i >= 0; i--)
1228 {
1229 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1230 litP += sizeof (LITTLENUM_TYPE);
1231 }
1232 else
1233 /* For a 4 byte float the order of elements in `words' is 1 0.
1234 For an 8 byte float the order is 1 0 3 2. */
1235 for (i = 0; i < prec; i += 2)
1236 {
1237 md_number_to_chars (litP, (valueT) words[i + 1],
1238 sizeof (LITTLENUM_TYPE));
1239 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1240 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1241 litP += 2 * sizeof (LITTLENUM_TYPE);
1242 }
1243 }
1244
1245 return NULL;
1246 }
1247
1248 /* We handle all bad expressions here, so that we can report the faulty
1249 instruction in the error message. */
1250
1251 void
1252 md_operand (expressionS * exp)
1253 {
1254 if (in_my_get_expression)
1255 exp->X_op = O_illegal;
1256 }
1257
1258 /* Immediate values. */
1259
1260 #ifdef OBJ_ELF
1261 /* Generic immediate-value read function for use in directives.
1262 Accepts anything that 'expression' can fold to a constant.
1263 *val receives the number. */
1264
1265 static int
1266 immediate_for_directive (int *val)
1267 {
1268 expressionS exp;
1269 exp.X_op = O_illegal;
1270
1271 if (is_immediate_prefix (*input_line_pointer))
1272 {
1273 input_line_pointer++;
1274 expression (&exp);
1275 }
1276
1277 if (exp.X_op != O_constant)
1278 {
1279 as_bad (_("expected #constant"));
1280 ignore_rest_of_line ();
1281 return FAIL;
1282 }
1283 *val = exp.X_add_number;
1284 return SUCCESS;
1285 }
1286 #endif
1287
1288 /* Register parsing. */
1289
1290 /* Generic register parser. CCP points to what should be the
1291 beginning of a register name. If it is indeed a valid register
1292 name, advance CCP over it and return the reg_entry structure;
1293 otherwise return NULL. Does not issue diagnostics. */
1294
1295 static struct reg_entry *
1296 arm_reg_parse_multi (char **ccp)
1297 {
1298 char *start = *ccp;
1299 char *p;
1300 struct reg_entry *reg;
1301
1302 skip_whitespace (start);
1303
1304 #ifdef REGISTER_PREFIX
1305 if (*start != REGISTER_PREFIX)
1306 return NULL;
1307 start++;
1308 #endif
1309 #ifdef OPTIONAL_REGISTER_PREFIX
1310 if (*start == OPTIONAL_REGISTER_PREFIX)
1311 start++;
1312 #endif
1313
1314 p = start;
1315 if (!ISALPHA (*p) || !is_name_beginner (*p))
1316 return NULL;
1317
1318 do
1319 p++;
1320 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1321
1322 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1323
1324 if (!reg)
1325 return NULL;
1326
1327 *ccp = p;
1328 return reg;
1329 }
1330
1331 static int
1332 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1333 enum arm_reg_type type)
1334 {
1335 /* Alternative syntaxes are accepted for a few register classes. */
1336 switch (type)
1337 {
1338 case REG_TYPE_MVF:
1339 case REG_TYPE_MVD:
1340 case REG_TYPE_MVFX:
1341 case REG_TYPE_MVDX:
1342 /* Generic coprocessor register names are allowed for these. */
1343 if (reg && reg->type == REG_TYPE_CN)
1344 return reg->number;
1345 break;
1346
1347 case REG_TYPE_CP:
1348 /* For backward compatibility, a bare number is valid here. */
1349 {
1350 unsigned long processor = strtoul (start, ccp, 10);
1351 if (*ccp != start && processor <= 15)
1352 return processor;
1353 }
1354 /* Fall through. */
1355
1356 case REG_TYPE_MMXWC:
1357 /* WC includes WCG. ??? I'm not sure this is true for all
1358 instructions that take WC registers. */
1359 if (reg && reg->type == REG_TYPE_MMXWCG)
1360 return reg->number;
1361 break;
1362
1363 default:
1364 break;
1365 }
1366
1367 return FAIL;
1368 }
1369
1370 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1371 return value is the register number or FAIL. */
1372
1373 static int
1374 arm_reg_parse (char **ccp, enum arm_reg_type type)
1375 {
1376 char *start = *ccp;
1377 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1378 int ret;
1379
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1382 return FAIL;
1383
1384 if (reg && reg->type == type)
1385 return reg->number;
1386
1387 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1388 return ret;
1389
1390 *ccp = start;
1391 return FAIL;
1392 }
1393
1394 /* Parse a Neon type specifier. *STR should point at the leading '.'
1395 character. Does no verification at this stage that the type fits the opcode
1396 properly. E.g.,
1397
1398 .i32.i32.s16
1399 .s32.f32
1400 .u16
1401
1402 Can all be legally parsed by this function.
1403
1404 Fills in neon_type struct pointer with parsed information, and updates STR
1405 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1406 type, FAIL if not. */
1407
1408 static int
1409 parse_neon_type (struct neon_type *type, char **str)
1410 {
1411 char *ptr = *str;
1412
1413 if (type)
1414 type->elems = 0;
1415
1416 while (type->elems < NEON_MAX_TYPE_ELS)
1417 {
1418 enum neon_el_type thistype = NT_untyped;
1419 unsigned thissize = -1u;
1420
1421 if (*ptr != '.')
1422 break;
1423
1424 ptr++;
1425
1426 /* Just a size without an explicit type. */
1427 if (ISDIGIT (*ptr))
1428 goto parsesize;
1429
1430 switch (TOLOWER (*ptr))
1431 {
1432 case 'i': thistype = NT_integer; break;
1433 case 'f': thistype = NT_float; break;
1434 case 'p': thistype = NT_poly; break;
1435 case 's': thistype = NT_signed; break;
1436 case 'u': thistype = NT_unsigned; break;
1437 case 'd':
1438 thistype = NT_float;
1439 thissize = 64;
1440 ptr++;
1441 goto done;
1442 default:
1443 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1444 return FAIL;
1445 }
1446
1447 ptr++;
1448
1449 /* .f is an abbreviation for .f32. */
1450 if (thistype == NT_float && !ISDIGIT (*ptr))
1451 thissize = 32;
1452 else
1453 {
1454 parsesize:
1455 thissize = strtoul (ptr, &ptr, 10);
1456
1457 if (thissize != 8 && thissize != 16 && thissize != 32
1458 && thissize != 64)
1459 {
1460 as_bad (_("bad size %d in type specifier"), thissize);
1461 return FAIL;
1462 }
1463 }
1464
1465 done:
1466 if (type)
1467 {
1468 type->el[type->elems].type = thistype;
1469 type->el[type->elems].size = thissize;
1470 type->elems++;
1471 }
1472 }
1473
1474 /* Empty/missing type is not a successful parse. */
1475 if (type->elems == 0)
1476 return FAIL;
1477
1478 *str = ptr;
1479
1480 return SUCCESS;
1481 }
1482
1483 /* Errors may be set multiple times during parsing or bit encoding
1484 (particularly in the Neon bits), but usually the earliest error which is set
1485 will be the most meaningful. Avoid overwriting it with later (cascading)
1486 errors by calling this function. */
1487
1488 static void
1489 first_error (const char *err)
1490 {
1491 if (!inst.error)
1492 inst.error = err;
1493 }
1494
1495 /* Parse a single type, e.g. ".s32", leading period included. */
1496 static int
1497 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1498 {
1499 char *str = *ccp;
1500 struct neon_type optype;
1501
1502 if (*str == '.')
1503 {
1504 if (parse_neon_type (&optype, &str) == SUCCESS)
1505 {
1506 if (optype.elems == 1)
1507 *vectype = optype.el[0];
1508 else
1509 {
1510 first_error (_("only one type should be specified for operand"));
1511 return FAIL;
1512 }
1513 }
1514 else
1515 {
1516 first_error (_("vector type expected"));
1517 return FAIL;
1518 }
1519 }
1520 else
1521 return FAIL;
1522
1523 *ccp = str;
1524
1525 return SUCCESS;
1526 }
1527
1528 /* Special meanings for indices (which have a range of 0-7), which will fit into
1529 a 4-bit integer. */
1530
1531 #define NEON_ALL_LANES 15
1532 #define NEON_INTERLEAVE_LANES 14
1533
1534 /* Record a use of the given feature. */
1535 static void
1536 record_feature_use (const arm_feature_set *feature)
1537 {
1538 if (thumb_mode)
1539 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1540 else
1541 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1542 }
1543
1544 /* If the given feature available in the selected CPU, mark it as used.
1545 Returns TRUE iff feature is available. */
1546 static bfd_boolean
1547 mark_feature_used (const arm_feature_set *feature)
1548 {
1549
1550 /* Do not support the use of MVE only instructions when in auto-detection or
1551 -march=all. */
1552 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1553 && ARM_CPU_IS_ANY (cpu_variant))
1554 {
1555 first_error (BAD_MVE_AUTO);
1556 return FALSE;
1557 }
1558 /* Ensure the option is valid on the current architecture. */
1559 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1560 return FALSE;
1561
1562 /* Add the appropriate architecture feature for the barrier option used.
1563 */
1564 record_feature_use (feature);
1565
1566 return TRUE;
1567 }
1568
1569 /* Parse either a register or a scalar, with an optional type. Return the
1570 register number, and optionally fill in the actual type of the register
1571 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1572 type/index information in *TYPEINFO. */
1573
1574 static int
1575 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1576 enum arm_reg_type *rtype,
1577 struct neon_typed_alias *typeinfo)
1578 {
1579 char *str = *ccp;
1580 struct reg_entry *reg = arm_reg_parse_multi (&str);
1581 struct neon_typed_alias atype;
1582 struct neon_type_el parsetype;
1583
1584 atype.defined = 0;
1585 atype.index = -1;
1586 atype.eltype.type = NT_invtype;
1587 atype.eltype.size = -1;
1588
1589 /* Try alternate syntax for some types of register. Note these are mutually
1590 exclusive with the Neon syntax extensions. */
1591 if (reg == NULL)
1592 {
1593 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1594 if (altreg != FAIL)
1595 *ccp = str;
1596 if (typeinfo)
1597 *typeinfo = atype;
1598 return altreg;
1599 }
1600
1601 /* Undo polymorphism when a set of register types may be accepted. */
1602 if ((type == REG_TYPE_NDQ
1603 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1604 || (type == REG_TYPE_VFSD
1605 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1606 || (type == REG_TYPE_NSDQ
1607 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1608 || reg->type == REG_TYPE_NQ))
1609 || (type == REG_TYPE_NSD
1610 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1611 || (type == REG_TYPE_MMXWC
1612 && (reg->type == REG_TYPE_MMXWCG)))
1613 type = (enum arm_reg_type) reg->type;
1614
1615 if (type == REG_TYPE_MQ)
1616 {
1617 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1618 return FAIL;
1619
1620 if (!reg || reg->type != REG_TYPE_NQ)
1621 return FAIL;
1622
1623 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1624 {
1625 first_error (_("expected MVE register [q0..q7]"));
1626 return FAIL;
1627 }
1628 type = REG_TYPE_NQ;
1629 }
1630 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1631 && (type == REG_TYPE_NQ))
1632 return FAIL;
1633
1634
1635 if (type != reg->type)
1636 return FAIL;
1637
1638 if (reg->neon)
1639 atype = *reg->neon;
1640
1641 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1642 {
1643 if ((atype.defined & NTA_HASTYPE) != 0)
1644 {
1645 first_error (_("can't redefine type for operand"));
1646 return FAIL;
1647 }
1648 atype.defined |= NTA_HASTYPE;
1649 atype.eltype = parsetype;
1650 }
1651
1652 if (skip_past_char (&str, '[') == SUCCESS)
1653 {
1654 if (type != REG_TYPE_VFD
1655 && !(type == REG_TYPE_VFS
1656 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1657 {
1658 first_error (_("only D registers may be indexed"));
1659 return FAIL;
1660 }
1661
1662 if ((atype.defined & NTA_HASINDEX) != 0)
1663 {
1664 first_error (_("can't change index for operand"));
1665 return FAIL;
1666 }
1667
1668 atype.defined |= NTA_HASINDEX;
1669
1670 if (skip_past_char (&str, ']') == SUCCESS)
1671 atype.index = NEON_ALL_LANES;
1672 else
1673 {
1674 expressionS exp;
1675
1676 my_get_expression (&exp, &str, GE_NO_PREFIX);
1677
1678 if (exp.X_op != O_constant)
1679 {
1680 first_error (_("constant expression required"));
1681 return FAIL;
1682 }
1683
1684 if (skip_past_char (&str, ']') == FAIL)
1685 return FAIL;
1686
1687 atype.index = exp.X_add_number;
1688 }
1689 }
1690
1691 if (typeinfo)
1692 *typeinfo = atype;
1693
1694 if (rtype)
1695 *rtype = type;
1696
1697 *ccp = str;
1698
1699 return reg->number;
1700 }
1701
1702 /* Like arm_reg_parse, but also allow the following extra features:
1703 - If RTYPE is non-zero, return the (possibly restricted) type of the
1704 register (e.g. Neon double or quad reg when either has been requested).
1705 - If this is a Neon vector type with additional type information, fill
1706 in the struct pointed to by VECTYPE (if non-NULL).
1707 This function will fault on encountering a scalar. */
1708
1709 static int
1710 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1711 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1712 {
1713 struct neon_typed_alias atype;
1714 char *str = *ccp;
1715 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1716
1717 if (reg == FAIL)
1718 return FAIL;
1719
1720 /* Do not allow regname(... to parse as a register. */
1721 if (*str == '(')
1722 return FAIL;
1723
1724 /* Do not allow a scalar (reg+index) to parse as a register. */
1725 if ((atype.defined & NTA_HASINDEX) != 0)
1726 {
1727 first_error (_("register operand expected, but got scalar"));
1728 return FAIL;
1729 }
1730
1731 if (vectype)
1732 *vectype = atype.eltype;
1733
1734 *ccp = str;
1735
1736 return reg;
1737 }
1738
1739 #define NEON_SCALAR_REG(X) ((X) >> 4)
1740 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1741
1742 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1743 have enough information to be able to do a good job bounds-checking. So, we
1744 just do easy checks here, and do further checks later. */
1745
1746 static int
1747 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1748 {
1749 int reg;
1750 char *str = *ccp;
1751 struct neon_typed_alias atype;
1752 enum arm_reg_type reg_type = REG_TYPE_VFD;
1753
1754 if (elsize == 4)
1755 reg_type = REG_TYPE_VFS;
1756
1757 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1758
1759 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1760 return FAIL;
1761
1762 if (atype.index == NEON_ALL_LANES)
1763 {
1764 first_error (_("scalar must have an index"));
1765 return FAIL;
1766 }
1767 else if (atype.index >= 64 / elsize)
1768 {
1769 first_error (_("scalar index out of range"));
1770 return FAIL;
1771 }
1772
1773 if (type)
1774 *type = atype.eltype;
1775
1776 *ccp = str;
1777
1778 return reg * 16 + atype.index;
1779 }
1780
1781 /* Types of registers in a list. */
1782
1783 enum reg_list_els
1784 {
1785 REGLIST_RN,
1786 REGLIST_CLRM,
1787 REGLIST_VFP_S,
1788 REGLIST_VFP_S_VPR,
1789 REGLIST_VFP_D,
1790 REGLIST_VFP_D_VPR,
1791 REGLIST_NEON_D
1792 };
1793
1794 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1795
1796 static long
1797 parse_reg_list (char ** strp, enum reg_list_els etype)
1798 {
1799 char *str = *strp;
1800 long range = 0;
1801 int another_range;
1802
1803 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1804
1805 /* We come back here if we get ranges concatenated by '+' or '|'. */
1806 do
1807 {
1808 skip_whitespace (str);
1809
1810 another_range = 0;
1811
1812 if (*str == '{')
1813 {
1814 int in_range = 0;
1815 int cur_reg = -1;
1816
1817 str++;
1818 do
1819 {
1820 int reg;
1821 const char apsr_str[] = "apsr";
1822 int apsr_str_len = strlen (apsr_str);
1823
1824 reg = arm_reg_parse (&str, REGLIST_RN);
1825 if (etype == REGLIST_CLRM)
1826 {
1827 if (reg == REG_SP || reg == REG_PC)
1828 reg = FAIL;
1829 else if (reg == FAIL
1830 && !strncasecmp (str, apsr_str, apsr_str_len)
1831 && !ISALPHA (*(str + apsr_str_len)))
1832 {
1833 reg = 15;
1834 str += apsr_str_len;
1835 }
1836
1837 if (reg == FAIL)
1838 {
1839 first_error (_("r0-r12, lr or APSR expected"));
1840 return FAIL;
1841 }
1842 }
1843 else /* etype == REGLIST_RN. */
1844 {
1845 if (reg == FAIL)
1846 {
1847 first_error (_(reg_expected_msgs[REGLIST_RN]));
1848 return FAIL;
1849 }
1850 }
1851
1852 if (in_range)
1853 {
1854 int i;
1855
1856 if (reg <= cur_reg)
1857 {
1858 first_error (_("bad range in register list"));
1859 return FAIL;
1860 }
1861
1862 for (i = cur_reg + 1; i < reg; i++)
1863 {
1864 if (range & (1 << i))
1865 as_tsktsk
1866 (_("Warning: duplicated register (r%d) in register list"),
1867 i);
1868 else
1869 range |= 1 << i;
1870 }
1871 in_range = 0;
1872 }
1873
1874 if (range & (1 << reg))
1875 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1876 reg);
1877 else if (reg <= cur_reg)
1878 as_tsktsk (_("Warning: register range not in ascending order"));
1879
1880 range |= 1 << reg;
1881 cur_reg = reg;
1882 }
1883 while (skip_past_comma (&str) != FAIL
1884 || (in_range = 1, *str++ == '-'));
1885 str--;
1886
1887 if (skip_past_char (&str, '}') == FAIL)
1888 {
1889 first_error (_("missing `}'"));
1890 return FAIL;
1891 }
1892 }
1893 else if (etype == REGLIST_RN)
1894 {
1895 expressionS exp;
1896
1897 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1898 return FAIL;
1899
1900 if (exp.X_op == O_constant)
1901 {
1902 if (exp.X_add_number
1903 != (exp.X_add_number & 0x0000ffff))
1904 {
1905 inst.error = _("invalid register mask");
1906 return FAIL;
1907 }
1908
1909 if ((range & exp.X_add_number) != 0)
1910 {
1911 int regno = range & exp.X_add_number;
1912
1913 regno &= -regno;
1914 regno = (1 << regno) - 1;
1915 as_tsktsk
1916 (_("Warning: duplicated register (r%d) in register list"),
1917 regno);
1918 }
1919
1920 range |= exp.X_add_number;
1921 }
1922 else
1923 {
1924 if (inst.relocs[0].type != 0)
1925 {
1926 inst.error = _("expression too complex");
1927 return FAIL;
1928 }
1929
1930 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1931 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1932 inst.relocs[0].pc_rel = 0;
1933 }
1934 }
1935
1936 if (*str == '|' || *str == '+')
1937 {
1938 str++;
1939 another_range = 1;
1940 }
1941 }
1942 while (another_range);
1943
1944 *strp = str;
1945 return range;
1946 }
1947
1948 /* Parse a VFP register list. If the string is invalid return FAIL.
1949 Otherwise return the number of registers, and set PBASE to the first
1950 register. Parses registers of type ETYPE.
1951 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1952 - Q registers can be used to specify pairs of D registers
1953 - { } can be omitted from around a singleton register list
1954 FIXME: This is not implemented, as it would require backtracking in
1955 some cases, e.g.:
1956 vtbl.8 d3,d4,d5
1957 This could be done (the meaning isn't really ambiguous), but doesn't
1958 fit in well with the current parsing framework.
1959 - 32 D registers may be used (also true for VFPv3).
1960 FIXME: Types are ignored in these register lists, which is probably a
1961 bug. */
1962
1963 static int
1964 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1965 bfd_boolean *partial_match)
1966 {
1967 char *str = *ccp;
1968 int base_reg;
1969 int new_base;
1970 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1971 int max_regs = 0;
1972 int count = 0;
1973 int warned = 0;
1974 unsigned long mask = 0;
1975 int i;
1976 bfd_boolean vpr_seen = FALSE;
1977 bfd_boolean expect_vpr =
1978 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1979
1980 if (skip_past_char (&str, '{') == FAIL)
1981 {
1982 inst.error = _("expecting {");
1983 return FAIL;
1984 }
1985
1986 switch (etype)
1987 {
1988 case REGLIST_VFP_S:
1989 case REGLIST_VFP_S_VPR:
1990 regtype = REG_TYPE_VFS;
1991 max_regs = 32;
1992 break;
1993
1994 case REGLIST_VFP_D:
1995 case REGLIST_VFP_D_VPR:
1996 regtype = REG_TYPE_VFD;
1997 break;
1998
1999 case REGLIST_NEON_D:
2000 regtype = REG_TYPE_NDQ;
2001 break;
2002
2003 default:
2004 gas_assert (0);
2005 }
2006
2007 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2008 {
2009 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2010 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2011 {
2012 max_regs = 32;
2013 if (thumb_mode)
2014 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2015 fpu_vfp_ext_d32);
2016 else
2017 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2018 fpu_vfp_ext_d32);
2019 }
2020 else
2021 max_regs = 16;
2022 }
2023
2024 base_reg = max_regs;
2025 *partial_match = FALSE;
2026
2027 do
2028 {
2029 int setmask = 1, addregs = 1;
2030 const char vpr_str[] = "vpr";
2031 int vpr_str_len = strlen (vpr_str);
2032
2033 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2034
2035 if (expect_vpr)
2036 {
2037 if (new_base == FAIL
2038 && !strncasecmp (str, vpr_str, vpr_str_len)
2039 && !ISALPHA (*(str + vpr_str_len))
2040 && !vpr_seen)
2041 {
2042 vpr_seen = TRUE;
2043 str += vpr_str_len;
2044 if (count == 0)
2045 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2046 }
2047 else if (vpr_seen)
2048 {
2049 first_error (_("VPR expected last"));
2050 return FAIL;
2051 }
2052 else if (new_base == FAIL)
2053 {
2054 if (regtype == REG_TYPE_VFS)
2055 first_error (_("VFP single precision register or VPR "
2056 "expected"));
2057 else /* regtype == REG_TYPE_VFD. */
2058 first_error (_("VFP/Neon double precision register or VPR "
2059 "expected"));
2060 return FAIL;
2061 }
2062 }
2063 else if (new_base == FAIL)
2064 {
2065 first_error (_(reg_expected_msgs[regtype]));
2066 return FAIL;
2067 }
2068
2069 *partial_match = TRUE;
2070 if (vpr_seen)
2071 continue;
2072
2073 if (new_base >= max_regs)
2074 {
2075 first_error (_("register out of range in list"));
2076 return FAIL;
2077 }
2078
2079 /* Note: a value of 2 * n is returned for the register Q<n>. */
2080 if (regtype == REG_TYPE_NQ)
2081 {
2082 setmask = 3;
2083 addregs = 2;
2084 }
2085
2086 if (new_base < base_reg)
2087 base_reg = new_base;
2088
2089 if (mask & (setmask << new_base))
2090 {
2091 first_error (_("invalid register list"));
2092 return FAIL;
2093 }
2094
2095 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2096 {
2097 as_tsktsk (_("register list not in ascending order"));
2098 warned = 1;
2099 }
2100
2101 mask |= setmask << new_base;
2102 count += addregs;
2103
2104 if (*str == '-') /* We have the start of a range expression */
2105 {
2106 int high_range;
2107
2108 str++;
2109
2110 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2111 == FAIL)
2112 {
2113 inst.error = gettext (reg_expected_msgs[regtype]);
2114 return FAIL;
2115 }
2116
2117 if (high_range >= max_regs)
2118 {
2119 first_error (_("register out of range in list"));
2120 return FAIL;
2121 }
2122
2123 if (regtype == REG_TYPE_NQ)
2124 high_range = high_range + 1;
2125
2126 if (high_range <= new_base)
2127 {
2128 inst.error = _("register range not in ascending order");
2129 return FAIL;
2130 }
2131
2132 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2133 {
2134 if (mask & (setmask << new_base))
2135 {
2136 inst.error = _("invalid register list");
2137 return FAIL;
2138 }
2139
2140 mask |= setmask << new_base;
2141 count += addregs;
2142 }
2143 }
2144 }
2145 while (skip_past_comma (&str) != FAIL);
2146
2147 str++;
2148
2149 /* Sanity check -- should have raised a parse error above. */
2150 if ((!vpr_seen && count == 0) || count > max_regs)
2151 abort ();
2152
2153 *pbase = base_reg;
2154
2155 if (expect_vpr && !vpr_seen)
2156 {
2157 first_error (_("VPR expected last"));
2158 return FAIL;
2159 }
2160
2161 /* Final test -- the registers must be consecutive. */
2162 mask >>= base_reg;
2163 for (i = 0; i < count; i++)
2164 {
2165 if ((mask & (1u << i)) == 0)
2166 {
2167 inst.error = _("non-contiguous register range");
2168 return FAIL;
2169 }
2170 }
2171
2172 *ccp = str;
2173
2174 return count;
2175 }
2176
2177 /* True if two alias types are the same. */
2178
2179 static bfd_boolean
2180 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2181 {
2182 if (!a && !b)
2183 return TRUE;
2184
2185 if (!a || !b)
2186 return FALSE;
2187
2188 if (a->defined != b->defined)
2189 return FALSE;
2190
2191 if ((a->defined & NTA_HASTYPE) != 0
2192 && (a->eltype.type != b->eltype.type
2193 || a->eltype.size != b->eltype.size))
2194 return FALSE;
2195
2196 if ((a->defined & NTA_HASINDEX) != 0
2197 && (a->index != b->index))
2198 return FALSE;
2199
2200 return TRUE;
2201 }
2202
2203 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2204 The base register is put in *PBASE.
2205 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2206 the return value.
2207 The register stride (minus one) is put in bit 4 of the return value.
2208 Bits [6:5] encode the list length (minus one).
2209 The type of the list elements is put in *ELTYPE, if non-NULL. */
2210
2211 #define NEON_LANE(X) ((X) & 0xf)
2212 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2213 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2214
2215 static int
2216 parse_neon_el_struct_list (char **str, unsigned *pbase,
2217 int mve,
2218 struct neon_type_el *eltype)
2219 {
2220 char *ptr = *str;
2221 int base_reg = -1;
2222 int reg_incr = -1;
2223 int count = 0;
2224 int lane = -1;
2225 int leading_brace = 0;
2226 enum arm_reg_type rtype = REG_TYPE_NDQ;
2227 const char *const incr_error = mve ? _("register stride must be 1") :
2228 _("register stride must be 1 or 2");
2229 const char *const type_error = _("mismatched element/structure types in list");
2230 struct neon_typed_alias firsttype;
2231 firsttype.defined = 0;
2232 firsttype.eltype.type = NT_invtype;
2233 firsttype.eltype.size = -1;
2234 firsttype.index = -1;
2235
2236 if (skip_past_char (&ptr, '{') == SUCCESS)
2237 leading_brace = 1;
2238
2239 do
2240 {
2241 struct neon_typed_alias atype;
2242 if (mve)
2243 rtype = REG_TYPE_MQ;
2244 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2245
2246 if (getreg == FAIL)
2247 {
2248 first_error (_(reg_expected_msgs[rtype]));
2249 return FAIL;
2250 }
2251
2252 if (base_reg == -1)
2253 {
2254 base_reg = getreg;
2255 if (rtype == REG_TYPE_NQ)
2256 {
2257 reg_incr = 1;
2258 }
2259 firsttype = atype;
2260 }
2261 else if (reg_incr == -1)
2262 {
2263 reg_incr = getreg - base_reg;
2264 if (reg_incr < 1 || reg_incr > 2)
2265 {
2266 first_error (_(incr_error));
2267 return FAIL;
2268 }
2269 }
2270 else if (getreg != base_reg + reg_incr * count)
2271 {
2272 first_error (_(incr_error));
2273 return FAIL;
2274 }
2275
2276 if (! neon_alias_types_same (&atype, &firsttype))
2277 {
2278 first_error (_(type_error));
2279 return FAIL;
2280 }
2281
2282 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2283 modes. */
2284 if (ptr[0] == '-')
2285 {
2286 struct neon_typed_alias htype;
2287 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2288 if (lane == -1)
2289 lane = NEON_INTERLEAVE_LANES;
2290 else if (lane != NEON_INTERLEAVE_LANES)
2291 {
2292 first_error (_(type_error));
2293 return FAIL;
2294 }
2295 if (reg_incr == -1)
2296 reg_incr = 1;
2297 else if (reg_incr != 1)
2298 {
2299 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2300 return FAIL;
2301 }
2302 ptr++;
2303 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2304 if (hireg == FAIL)
2305 {
2306 first_error (_(reg_expected_msgs[rtype]));
2307 return FAIL;
2308 }
2309 if (! neon_alias_types_same (&htype, &firsttype))
2310 {
2311 first_error (_(type_error));
2312 return FAIL;
2313 }
2314 count += hireg + dregs - getreg;
2315 continue;
2316 }
2317
2318 /* If we're using Q registers, we can't use [] or [n] syntax. */
2319 if (rtype == REG_TYPE_NQ)
2320 {
2321 count += 2;
2322 continue;
2323 }
2324
2325 if ((atype.defined & NTA_HASINDEX) != 0)
2326 {
2327 if (lane == -1)
2328 lane = atype.index;
2329 else if (lane != atype.index)
2330 {
2331 first_error (_(type_error));
2332 return FAIL;
2333 }
2334 }
2335 else if (lane == -1)
2336 lane = NEON_INTERLEAVE_LANES;
2337 else if (lane != NEON_INTERLEAVE_LANES)
2338 {
2339 first_error (_(type_error));
2340 return FAIL;
2341 }
2342 count++;
2343 }
2344 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2345
2346 /* No lane set by [x]. We must be interleaving structures. */
2347 if (lane == -1)
2348 lane = NEON_INTERLEAVE_LANES;
2349
2350 /* Sanity check. */
2351 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2352 || (count > 1 && reg_incr == -1))
2353 {
2354 first_error (_("error parsing element/structure list"));
2355 return FAIL;
2356 }
2357
2358 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2359 {
2360 first_error (_("expected }"));
2361 return FAIL;
2362 }
2363
2364 if (reg_incr == -1)
2365 reg_incr = 1;
2366
2367 if (eltype)
2368 *eltype = firsttype.eltype;
2369
2370 *pbase = base_reg;
2371 *str = ptr;
2372
2373 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2374 }
2375
2376 /* Parse an explicit relocation suffix on an expression. This is
2377 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2378 arm_reloc_hsh contains no entries, so this function can only
2379 succeed if there is no () after the word. Returns -1 on error,
2380 BFD_RELOC_UNUSED if there wasn't any suffix. */
2381
2382 static int
2383 parse_reloc (char **str)
2384 {
2385 struct reloc_entry *r;
2386 char *p, *q;
2387
2388 if (**str != '(')
2389 return BFD_RELOC_UNUSED;
2390
2391 p = *str + 1;
2392 q = p;
2393
2394 while (*q && *q != ')' && *q != ',')
2395 q++;
2396 if (*q != ')')
2397 return -1;
2398
2399 if ((r = (struct reloc_entry *)
2400 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2401 return -1;
2402
2403 *str = q + 1;
2404 return r->reloc;
2405 }
2406
2407 /* Directives: register aliases. */
2408
2409 static struct reg_entry *
2410 insert_reg_alias (char *str, unsigned number, int type)
2411 {
2412 struct reg_entry *new_reg;
2413 const char *name;
2414
2415 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2416 {
2417 if (new_reg->builtin)
2418 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2419
2420 /* Only warn about a redefinition if it's not defined as the
2421 same register. */
2422 else if (new_reg->number != number || new_reg->type != type)
2423 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2424
2425 return NULL;
2426 }
2427
2428 name = xstrdup (str);
2429 new_reg = XNEW (struct reg_entry);
2430
2431 new_reg->name = name;
2432 new_reg->number = number;
2433 new_reg->type = type;
2434 new_reg->builtin = FALSE;
2435 new_reg->neon = NULL;
2436
2437 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2438 abort ();
2439
2440 return new_reg;
2441 }
2442
2443 static void
2444 insert_neon_reg_alias (char *str, int number, int type,
2445 struct neon_typed_alias *atype)
2446 {
2447 struct reg_entry *reg = insert_reg_alias (str, number, type);
2448
2449 if (!reg)
2450 {
2451 first_error (_("attempt to redefine typed alias"));
2452 return;
2453 }
2454
2455 if (atype)
2456 {
2457 reg->neon = XNEW (struct neon_typed_alias);
2458 *reg->neon = *atype;
2459 }
2460 }
2461
2462 /* Look for the .req directive. This is of the form:
2463
2464 new_register_name .req existing_register_name
2465
2466 If we find one, or if it looks sufficiently like one that we want to
2467 handle any error here, return TRUE. Otherwise return FALSE. */
2468
2469 static bfd_boolean
2470 create_register_alias (char * newname, char *p)
2471 {
2472 struct reg_entry *old;
2473 char *oldname, *nbuf;
2474 size_t nlen;
2475
2476 /* The input scrubber ensures that whitespace after the mnemonic is
2477 collapsed to single spaces. */
2478 oldname = p;
2479 if (strncmp (oldname, " .req ", 6) != 0)
2480 return FALSE;
2481
2482 oldname += 6;
2483 if (*oldname == '\0')
2484 return FALSE;
2485
2486 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2487 if (!old)
2488 {
2489 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2490 return TRUE;
2491 }
2492
2493 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2494 the desired alias name, and p points to its end. If not, then
2495 the desired alias name is in the global original_case_string. */
2496 #ifdef TC_CASE_SENSITIVE
2497 nlen = p - newname;
2498 #else
2499 newname = original_case_string;
2500 nlen = strlen (newname);
2501 #endif
2502
2503 nbuf = xmemdup0 (newname, nlen);
2504
2505 /* Create aliases under the new name as stated; an all-lowercase
2506 version of the new name; and an all-uppercase version of the new
2507 name. */
2508 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2509 {
2510 for (p = nbuf; *p; p++)
2511 *p = TOUPPER (*p);
2512
2513 if (strncmp (nbuf, newname, nlen))
2514 {
2515 /* If this attempt to create an additional alias fails, do not bother
2516 trying to create the all-lower case alias. We will fail and issue
2517 a second, duplicate error message. This situation arises when the
2518 programmer does something like:
2519 foo .req r0
2520 Foo .req r1
2521 The second .req creates the "Foo" alias but then fails to create
2522 the artificial FOO alias because it has already been created by the
2523 first .req. */
2524 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2525 {
2526 free (nbuf);
2527 return TRUE;
2528 }
2529 }
2530
2531 for (p = nbuf; *p; p++)
2532 *p = TOLOWER (*p);
2533
2534 if (strncmp (nbuf, newname, nlen))
2535 insert_reg_alias (nbuf, old->number, old->type);
2536 }
2537
2538 free (nbuf);
2539 return TRUE;
2540 }
2541
2542 /* Create a Neon typed/indexed register alias using directives, e.g.:
2543 X .dn d5.s32[1]
2544 Y .qn 6.s16
2545 Z .dn d7
2546 T .dn Z[0]
2547 These typed registers can be used instead of the types specified after the
2548 Neon mnemonic, so long as all operands given have types. Types can also be
2549 specified directly, e.g.:
2550 vadd d0.s32, d1.s32, d2.s32 */
2551
2552 static bfd_boolean
2553 create_neon_reg_alias (char *newname, char *p)
2554 {
2555 enum arm_reg_type basetype;
2556 struct reg_entry *basereg;
2557 struct reg_entry mybasereg;
2558 struct neon_type ntype;
2559 struct neon_typed_alias typeinfo;
2560 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2561 int namelen;
2562
2563 typeinfo.defined = 0;
2564 typeinfo.eltype.type = NT_invtype;
2565 typeinfo.eltype.size = -1;
2566 typeinfo.index = -1;
2567
2568 nameend = p;
2569
2570 if (strncmp (p, " .dn ", 5) == 0)
2571 basetype = REG_TYPE_VFD;
2572 else if (strncmp (p, " .qn ", 5) == 0)
2573 basetype = REG_TYPE_NQ;
2574 else
2575 return FALSE;
2576
2577 p += 5;
2578
2579 if (*p == '\0')
2580 return FALSE;
2581
2582 basereg = arm_reg_parse_multi (&p);
2583
2584 if (basereg && basereg->type != basetype)
2585 {
2586 as_bad (_("bad type for register"));
2587 return FALSE;
2588 }
2589
2590 if (basereg == NULL)
2591 {
2592 expressionS exp;
2593 /* Try parsing as an integer. */
2594 my_get_expression (&exp, &p, GE_NO_PREFIX);
2595 if (exp.X_op != O_constant)
2596 {
2597 as_bad (_("expression must be constant"));
2598 return FALSE;
2599 }
2600 basereg = &mybasereg;
2601 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2602 : exp.X_add_number;
2603 basereg->neon = 0;
2604 }
2605
2606 if (basereg->neon)
2607 typeinfo = *basereg->neon;
2608
2609 if (parse_neon_type (&ntype, &p) == SUCCESS)
2610 {
2611 /* We got a type. */
2612 if (typeinfo.defined & NTA_HASTYPE)
2613 {
2614 as_bad (_("can't redefine the type of a register alias"));
2615 return FALSE;
2616 }
2617
2618 typeinfo.defined |= NTA_HASTYPE;
2619 if (ntype.elems != 1)
2620 {
2621 as_bad (_("you must specify a single type only"));
2622 return FALSE;
2623 }
2624 typeinfo.eltype = ntype.el[0];
2625 }
2626
2627 if (skip_past_char (&p, '[') == SUCCESS)
2628 {
2629 expressionS exp;
2630 /* We got a scalar index. */
2631
2632 if (typeinfo.defined & NTA_HASINDEX)
2633 {
2634 as_bad (_("can't redefine the index of a scalar alias"));
2635 return FALSE;
2636 }
2637
2638 my_get_expression (&exp, &p, GE_NO_PREFIX);
2639
2640 if (exp.X_op != O_constant)
2641 {
2642 as_bad (_("scalar index must be constant"));
2643 return FALSE;
2644 }
2645
2646 typeinfo.defined |= NTA_HASINDEX;
2647 typeinfo.index = exp.X_add_number;
2648
2649 if (skip_past_char (&p, ']') == FAIL)
2650 {
2651 as_bad (_("expecting ]"));
2652 return FALSE;
2653 }
2654 }
2655
2656 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2657 the desired alias name, and p points to its end. If not, then
2658 the desired alias name is in the global original_case_string. */
2659 #ifdef TC_CASE_SENSITIVE
2660 namelen = nameend - newname;
2661 #else
2662 newname = original_case_string;
2663 namelen = strlen (newname);
2664 #endif
2665
2666 namebuf = xmemdup0 (newname, namelen);
2667
2668 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2669 typeinfo.defined != 0 ? &typeinfo : NULL);
2670
2671 /* Insert name in all uppercase. */
2672 for (p = namebuf; *p; p++)
2673 *p = TOUPPER (*p);
2674
2675 if (strncmp (namebuf, newname, namelen))
2676 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2677 typeinfo.defined != 0 ? &typeinfo : NULL);
2678
2679 /* Insert name in all lowercase. */
2680 for (p = namebuf; *p; p++)
2681 *p = TOLOWER (*p);
2682
2683 if (strncmp (namebuf, newname, namelen))
2684 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2685 typeinfo.defined != 0 ? &typeinfo : NULL);
2686
2687 free (namebuf);
2688 return TRUE;
2689 }
2690
2691 /* Should never be called, as .req goes between the alias and the
2692 register name, not at the beginning of the line. */
2693
2694 static void
2695 s_req (int a ATTRIBUTE_UNUSED)
2696 {
2697 as_bad (_("invalid syntax for .req directive"));
2698 }
2699
2700 static void
2701 s_dn (int a ATTRIBUTE_UNUSED)
2702 {
2703 as_bad (_("invalid syntax for .dn directive"));
2704 }
2705
2706 static void
2707 s_qn (int a ATTRIBUTE_UNUSED)
2708 {
2709 as_bad (_("invalid syntax for .qn directive"));
2710 }
2711
2712 /* The .unreq directive deletes an alias which was previously defined
2713 by .req. For example:
2714
2715 my_alias .req r11
2716 .unreq my_alias */
2717
2718 static void
2719 s_unreq (int a ATTRIBUTE_UNUSED)
2720 {
2721 char * name;
2722 char saved_char;
2723
2724 name = input_line_pointer;
2725
2726 while (*input_line_pointer != 0
2727 && *input_line_pointer != ' '
2728 && *input_line_pointer != '\n')
2729 ++input_line_pointer;
2730
2731 saved_char = *input_line_pointer;
2732 *input_line_pointer = 0;
2733
2734 if (!*name)
2735 as_bad (_("invalid syntax for .unreq directive"));
2736 else
2737 {
2738 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2739 name);
2740
2741 if (!reg)
2742 as_bad (_("unknown register alias '%s'"), name);
2743 else if (reg->builtin)
2744 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2745 name);
2746 else
2747 {
2748 char * p;
2749 char * nbuf;
2750
2751 hash_delete (arm_reg_hsh, name, FALSE);
2752 free ((char *) reg->name);
2753 if (reg->neon)
2754 free (reg->neon);
2755 free (reg);
2756
2757 /* Also locate the all upper case and all lower case versions.
2758 Do not complain if we cannot find one or the other as it
2759 was probably deleted above. */
2760
2761 nbuf = strdup (name);
2762 for (p = nbuf; *p; p++)
2763 *p = TOUPPER (*p);
2764 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2765 if (reg)
2766 {
2767 hash_delete (arm_reg_hsh, nbuf, FALSE);
2768 free ((char *) reg->name);
2769 if (reg->neon)
2770 free (reg->neon);
2771 free (reg);
2772 }
2773
2774 for (p = nbuf; *p; p++)
2775 *p = TOLOWER (*p);
2776 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2777 if (reg)
2778 {
2779 hash_delete (arm_reg_hsh, nbuf, FALSE);
2780 free ((char *) reg->name);
2781 if (reg->neon)
2782 free (reg->neon);
2783 free (reg);
2784 }
2785
2786 free (nbuf);
2787 }
2788 }
2789
2790 *input_line_pointer = saved_char;
2791 demand_empty_rest_of_line ();
2792 }
2793
2794 /* Directives: Instruction set selection. */
2795
2796 #ifdef OBJ_ELF
2797 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2798 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2799 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2800 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2801
2802 /* Create a new mapping symbol for the transition to STATE. */
2803
2804 static void
2805 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2806 {
2807 symbolS * symbolP;
2808 const char * symname;
2809 int type;
2810
2811 switch (state)
2812 {
2813 case MAP_DATA:
2814 symname = "$d";
2815 type = BSF_NO_FLAGS;
2816 break;
2817 case MAP_ARM:
2818 symname = "$a";
2819 type = BSF_NO_FLAGS;
2820 break;
2821 case MAP_THUMB:
2822 symname = "$t";
2823 type = BSF_NO_FLAGS;
2824 break;
2825 default:
2826 abort ();
2827 }
2828
2829 symbolP = symbol_new (symname, now_seg, value, frag);
2830 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2831
2832 switch (state)
2833 {
2834 case MAP_ARM:
2835 THUMB_SET_FUNC (symbolP, 0);
2836 ARM_SET_THUMB (symbolP, 0);
2837 ARM_SET_INTERWORK (symbolP, support_interwork);
2838 break;
2839
2840 case MAP_THUMB:
2841 THUMB_SET_FUNC (symbolP, 1);
2842 ARM_SET_THUMB (symbolP, 1);
2843 ARM_SET_INTERWORK (symbolP, support_interwork);
2844 break;
2845
2846 case MAP_DATA:
2847 default:
2848 break;
2849 }
2850
2851 /* Save the mapping symbols for future reference. Also check that
2852 we do not place two mapping symbols at the same offset within a
2853 frag. We'll handle overlap between frags in
2854 check_mapping_symbols.
2855
2856 If .fill or other data filling directive generates zero sized data,
2857 the mapping symbol for the following code will have the same value
2858 as the one generated for the data filling directive. In this case,
2859 we replace the old symbol with the new one at the same address. */
2860 if (value == 0)
2861 {
2862 if (frag->tc_frag_data.first_map != NULL)
2863 {
2864 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2865 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2866 }
2867 frag->tc_frag_data.first_map = symbolP;
2868 }
2869 if (frag->tc_frag_data.last_map != NULL)
2870 {
2871 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2872 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2873 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2874 }
2875 frag->tc_frag_data.last_map = symbolP;
2876 }
2877
2878 /* We must sometimes convert a region marked as code to data during
2879 code alignment, if an odd number of bytes have to be padded. The
2880 code mapping symbol is pushed to an aligned address. */
2881
2882 static void
2883 insert_data_mapping_symbol (enum mstate state,
2884 valueT value, fragS *frag, offsetT bytes)
2885 {
2886 /* If there was already a mapping symbol, remove it. */
2887 if (frag->tc_frag_data.last_map != NULL
2888 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2889 {
2890 symbolS *symp = frag->tc_frag_data.last_map;
2891
2892 if (value == 0)
2893 {
2894 know (frag->tc_frag_data.first_map == symp);
2895 frag->tc_frag_data.first_map = NULL;
2896 }
2897 frag->tc_frag_data.last_map = NULL;
2898 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2899 }
2900
2901 make_mapping_symbol (MAP_DATA, value, frag);
2902 make_mapping_symbol (state, value + bytes, frag);
2903 }
2904
2905 static void mapping_state_2 (enum mstate state, int max_chars);
2906
2907 /* Set the mapping state to STATE. Only call this when about to
2908 emit some STATE bytes to the file. */
2909
2910 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2911 void
2912 mapping_state (enum mstate state)
2913 {
2914 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2915
2916 if (mapstate == state)
2917 /* The mapping symbol has already been emitted.
2918 There is nothing else to do. */
2919 return;
2920
2921 if (state == MAP_ARM || state == MAP_THUMB)
2922 /* PR gas/12931
2923 All ARM instructions require 4-byte alignment.
2924 (Almost) all Thumb instructions require 2-byte alignment.
2925
2926 When emitting instructions into any section, mark the section
2927 appropriately.
2928
2929 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2930 but themselves require 2-byte alignment; this applies to some
2931 PC- relative forms. However, these cases will involve implicit
2932 literal pool generation or an explicit .align >=2, both of
2933 which will cause the section to me marked with sufficient
2934 alignment. Thus, we don't handle those cases here. */
2935 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2936
2937 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2938 /* This case will be evaluated later. */
2939 return;
2940
2941 mapping_state_2 (state, 0);
2942 }
2943
2944 /* Same as mapping_state, but MAX_CHARS bytes have already been
2945 allocated. Put the mapping symbol that far back. */
2946
2947 static void
2948 mapping_state_2 (enum mstate state, int max_chars)
2949 {
2950 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2951
2952 if (!SEG_NORMAL (now_seg))
2953 return;
2954
2955 if (mapstate == state)
2956 /* The mapping symbol has already been emitted.
2957 There is nothing else to do. */
2958 return;
2959
2960 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2961 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2962 {
2963 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2964 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2965
2966 if (add_symbol)
2967 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2968 }
2969
2970 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2971 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2972 }
2973 #undef TRANSITION
2974 #else
2975 #define mapping_state(x) ((void)0)
2976 #define mapping_state_2(x, y) ((void)0)
2977 #endif
2978
2979 /* Find the real, Thumb encoded start of a Thumb function. */
2980
2981 #ifdef OBJ_COFF
2982 static symbolS *
2983 find_real_start (symbolS * symbolP)
2984 {
2985 char * real_start;
2986 const char * name = S_GET_NAME (symbolP);
2987 symbolS * new_target;
2988
2989 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2990 #define STUB_NAME ".real_start_of"
2991
2992 if (name == NULL)
2993 abort ();
2994
2995 /* The compiler may generate BL instructions to local labels because
2996 it needs to perform a branch to a far away location. These labels
2997 do not have a corresponding ".real_start_of" label. We check
2998 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2999 the ".real_start_of" convention for nonlocal branches. */
3000 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3001 return symbolP;
3002
3003 real_start = concat (STUB_NAME, name, NULL);
3004 new_target = symbol_find (real_start);
3005 free (real_start);
3006
3007 if (new_target == NULL)
3008 {
3009 as_warn (_("Failed to find real start of function: %s\n"), name);
3010 new_target = symbolP;
3011 }
3012
3013 return new_target;
3014 }
3015 #endif
3016
3017 static void
3018 opcode_select (int width)
3019 {
3020 switch (width)
3021 {
3022 case 16:
3023 if (! thumb_mode)
3024 {
3025 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3026 as_bad (_("selected processor does not support THUMB opcodes"));
3027
3028 thumb_mode = 1;
3029 /* No need to force the alignment, since we will have been
3030 coming from ARM mode, which is word-aligned. */
3031 record_alignment (now_seg, 1);
3032 }
3033 break;
3034
3035 case 32:
3036 if (thumb_mode)
3037 {
3038 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3039 as_bad (_("selected processor does not support ARM opcodes"));
3040
3041 thumb_mode = 0;
3042
3043 if (!need_pass_2)
3044 frag_align (2, 0, 0);
3045
3046 record_alignment (now_seg, 1);
3047 }
3048 break;
3049
3050 default:
3051 as_bad (_("invalid instruction size selected (%d)"), width);
3052 }
3053 }
3054
3055 static void
3056 s_arm (int ignore ATTRIBUTE_UNUSED)
3057 {
3058 opcode_select (32);
3059 demand_empty_rest_of_line ();
3060 }
3061
3062 static void
3063 s_thumb (int ignore ATTRIBUTE_UNUSED)
3064 {
3065 opcode_select (16);
3066 demand_empty_rest_of_line ();
3067 }
3068
3069 static void
3070 s_code (int unused ATTRIBUTE_UNUSED)
3071 {
3072 int temp;
3073
3074 temp = get_absolute_expression ();
3075 switch (temp)
3076 {
3077 case 16:
3078 case 32:
3079 opcode_select (temp);
3080 break;
3081
3082 default:
3083 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3084 }
3085 }
3086
3087 static void
3088 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3089 {
3090 /* If we are not already in thumb mode go into it, EVEN if
3091 the target processor does not support thumb instructions.
3092 This is used by gcc/config/arm/lib1funcs.asm for example
3093 to compile interworking support functions even if the
3094 target processor should not support interworking. */
3095 if (! thumb_mode)
3096 {
3097 thumb_mode = 2;
3098 record_alignment (now_seg, 1);
3099 }
3100
3101 demand_empty_rest_of_line ();
3102 }
3103
3104 static void
3105 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3106 {
3107 s_thumb (0);
3108
3109 /* The following label is the name/address of the start of a Thumb function.
3110 We need to know this for the interworking support. */
3111 label_is_thumb_function_name = TRUE;
3112 }
3113
3114 /* Perform a .set directive, but also mark the alias as
3115 being a thumb function. */
3116
3117 static void
3118 s_thumb_set (int equiv)
3119 {
3120 /* XXX the following is a duplicate of the code for s_set() in read.c
3121 We cannot just call that code as we need to get at the symbol that
3122 is created. */
3123 char * name;
3124 char delim;
3125 char * end_name;
3126 symbolS * symbolP;
3127
3128 /* Especial apologies for the random logic:
3129 This just grew, and could be parsed much more simply!
3130 Dean - in haste. */
3131 delim = get_symbol_name (& name);
3132 end_name = input_line_pointer;
3133 (void) restore_line_pointer (delim);
3134
3135 if (*input_line_pointer != ',')
3136 {
3137 *end_name = 0;
3138 as_bad (_("expected comma after name \"%s\""), name);
3139 *end_name = delim;
3140 ignore_rest_of_line ();
3141 return;
3142 }
3143
3144 input_line_pointer++;
3145 *end_name = 0;
3146
3147 if (name[0] == '.' && name[1] == '\0')
3148 {
3149 /* XXX - this should not happen to .thumb_set. */
3150 abort ();
3151 }
3152
3153 if ((symbolP = symbol_find (name)) == NULL
3154 && (symbolP = md_undefined_symbol (name)) == NULL)
3155 {
3156 #ifndef NO_LISTING
3157 /* When doing symbol listings, play games with dummy fragments living
3158 outside the normal fragment chain to record the file and line info
3159 for this symbol. */
3160 if (listing & LISTING_SYMBOLS)
3161 {
3162 extern struct list_info_struct * listing_tail;
3163 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3164
3165 memset (dummy_frag, 0, sizeof (fragS));
3166 dummy_frag->fr_type = rs_fill;
3167 dummy_frag->line = listing_tail;
3168 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3169 dummy_frag->fr_symbol = symbolP;
3170 }
3171 else
3172 #endif
3173 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3174
3175 #ifdef OBJ_COFF
3176 /* "set" symbols are local unless otherwise specified. */
3177 SF_SET_LOCAL (symbolP);
3178 #endif /* OBJ_COFF */
3179 } /* Make a new symbol. */
3180
3181 symbol_table_insert (symbolP);
3182
3183 * end_name = delim;
3184
3185 if (equiv
3186 && S_IS_DEFINED (symbolP)
3187 && S_GET_SEGMENT (symbolP) != reg_section)
3188 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3189
3190 pseudo_set (symbolP);
3191
3192 demand_empty_rest_of_line ();
3193
3194 /* XXX Now we come to the Thumb specific bit of code. */
3195
3196 THUMB_SET_FUNC (symbolP, 1);
3197 ARM_SET_THUMB (symbolP, 1);
3198 #if defined OBJ_ELF || defined OBJ_COFF
3199 ARM_SET_INTERWORK (symbolP, support_interwork);
3200 #endif
3201 }
3202
3203 /* Directives: Mode selection. */
3204
3205 /* .syntax [unified|divided] - choose the new unified syntax
3206 (same for Arm and Thumb encoding, modulo slight differences in what
3207 can be represented) or the old divergent syntax for each mode. */
3208 static void
3209 s_syntax (int unused ATTRIBUTE_UNUSED)
3210 {
3211 char *name, delim;
3212
3213 delim = get_symbol_name (& name);
3214
3215 if (!strcasecmp (name, "unified"))
3216 unified_syntax = TRUE;
3217 else if (!strcasecmp (name, "divided"))
3218 unified_syntax = FALSE;
3219 else
3220 {
3221 as_bad (_("unrecognized syntax mode \"%s\""), name);
3222 return;
3223 }
3224 (void) restore_line_pointer (delim);
3225 demand_empty_rest_of_line ();
3226 }
3227
3228 /* Directives: sectioning and alignment. */
3229
3230 static void
3231 s_bss (int ignore ATTRIBUTE_UNUSED)
3232 {
3233 /* We don't support putting frags in the BSS segment, we fake it by
3234 marking in_bss, then looking at s_skip for clues. */
3235 subseg_set (bss_section, 0);
3236 demand_empty_rest_of_line ();
3237
3238 #ifdef md_elf_section_change_hook
3239 md_elf_section_change_hook ();
3240 #endif
3241 }
3242
3243 static void
3244 s_even (int ignore ATTRIBUTE_UNUSED)
3245 {
3246 /* Never make frag if expect extra pass. */
3247 if (!need_pass_2)
3248 frag_align (1, 0, 0);
3249
3250 record_alignment (now_seg, 1);
3251
3252 demand_empty_rest_of_line ();
3253 }
3254
3255 /* Directives: CodeComposer Studio. */
3256
3257 /* .ref (for CodeComposer Studio syntax only). */
3258 static void
3259 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3260 {
3261 if (codecomposer_syntax)
3262 ignore_rest_of_line ();
3263 else
3264 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3265 }
3266
3267 /* If name is not NULL, then it is used for marking the beginning of a
3268 function, whereas if it is NULL then it means the function end. */
3269 static void
3270 asmfunc_debug (const char * name)
3271 {
3272 static const char * last_name = NULL;
3273
3274 if (name != NULL)
3275 {
3276 gas_assert (last_name == NULL);
3277 last_name = name;
3278
3279 if (debug_type == DEBUG_STABS)
3280 stabs_generate_asm_func (name, name);
3281 }
3282 else
3283 {
3284 gas_assert (last_name != NULL);
3285
3286 if (debug_type == DEBUG_STABS)
3287 stabs_generate_asm_endfunc (last_name, last_name);
3288
3289 last_name = NULL;
3290 }
3291 }
3292
3293 static void
3294 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3295 {
3296 if (codecomposer_syntax)
3297 {
3298 switch (asmfunc_state)
3299 {
3300 case OUTSIDE_ASMFUNC:
3301 asmfunc_state = WAITING_ASMFUNC_NAME;
3302 break;
3303
3304 case WAITING_ASMFUNC_NAME:
3305 as_bad (_(".asmfunc repeated."));
3306 break;
3307
3308 case WAITING_ENDASMFUNC:
3309 as_bad (_(".asmfunc without function."));
3310 break;
3311 }
3312 demand_empty_rest_of_line ();
3313 }
3314 else
3315 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3316 }
3317
3318 static void
3319 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3320 {
3321 if (codecomposer_syntax)
3322 {
3323 switch (asmfunc_state)
3324 {
3325 case OUTSIDE_ASMFUNC:
3326 as_bad (_(".endasmfunc without a .asmfunc."));
3327 break;
3328
3329 case WAITING_ASMFUNC_NAME:
3330 as_bad (_(".endasmfunc without function."));
3331 break;
3332
3333 case WAITING_ENDASMFUNC:
3334 asmfunc_state = OUTSIDE_ASMFUNC;
3335 asmfunc_debug (NULL);
3336 break;
3337 }
3338 demand_empty_rest_of_line ();
3339 }
3340 else
3341 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3342 }
3343
3344 static void
3345 s_ccs_def (int name)
3346 {
3347 if (codecomposer_syntax)
3348 s_globl (name);
3349 else
3350 as_bad (_(".def pseudo-op only available with -mccs flag."));
3351 }
3352
3353 /* Directives: Literal pools. */
3354
3355 static literal_pool *
3356 find_literal_pool (void)
3357 {
3358 literal_pool * pool;
3359
3360 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3361 {
3362 if (pool->section == now_seg
3363 && pool->sub_section == now_subseg)
3364 break;
3365 }
3366
3367 return pool;
3368 }
3369
3370 static literal_pool *
3371 find_or_make_literal_pool (void)
3372 {
3373 /* Next literal pool ID number. */
3374 static unsigned int latest_pool_num = 1;
3375 literal_pool * pool;
3376
3377 pool = find_literal_pool ();
3378
3379 if (pool == NULL)
3380 {
3381 /* Create a new pool. */
3382 pool = XNEW (literal_pool);
3383 if (! pool)
3384 return NULL;
3385
3386 pool->next_free_entry = 0;
3387 pool->section = now_seg;
3388 pool->sub_section = now_subseg;
3389 pool->next = list_of_pools;
3390 pool->symbol = NULL;
3391 pool->alignment = 2;
3392
3393 /* Add it to the list. */
3394 list_of_pools = pool;
3395 }
3396
3397 /* New pools, and emptied pools, will have a NULL symbol. */
3398 if (pool->symbol == NULL)
3399 {
3400 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3401 (valueT) 0, &zero_address_frag);
3402 pool->id = latest_pool_num ++;
3403 }
3404
3405 /* Done. */
3406 return pool;
3407 }
3408
3409 /* Add the literal in the global 'inst'
3410 structure to the relevant literal pool. */
3411
3412 static int
3413 add_to_lit_pool (unsigned int nbytes)
3414 {
3415 #define PADDING_SLOT 0x1
3416 #define LIT_ENTRY_SIZE_MASK 0xFF
3417 literal_pool * pool;
3418 unsigned int entry, pool_size = 0;
3419 bfd_boolean padding_slot_p = FALSE;
3420 unsigned imm1 = 0;
3421 unsigned imm2 = 0;
3422
3423 if (nbytes == 8)
3424 {
3425 imm1 = inst.operands[1].imm;
3426 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3427 : inst.relocs[0].exp.X_unsigned ? 0
3428 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3429 if (target_big_endian)
3430 {
3431 imm1 = imm2;
3432 imm2 = inst.operands[1].imm;
3433 }
3434 }
3435
3436 pool = find_or_make_literal_pool ();
3437
3438 /* Check if this literal value is already in the pool. */
3439 for (entry = 0; entry < pool->next_free_entry; entry ++)
3440 {
3441 if (nbytes == 4)
3442 {
3443 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3444 && (inst.relocs[0].exp.X_op == O_constant)
3445 && (pool->literals[entry].X_add_number
3446 == inst.relocs[0].exp.X_add_number)
3447 && (pool->literals[entry].X_md == nbytes)
3448 && (pool->literals[entry].X_unsigned
3449 == inst.relocs[0].exp.X_unsigned))
3450 break;
3451
3452 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3453 && (inst.relocs[0].exp.X_op == O_symbol)
3454 && (pool->literals[entry].X_add_number
3455 == inst.relocs[0].exp.X_add_number)
3456 && (pool->literals[entry].X_add_symbol
3457 == inst.relocs[0].exp.X_add_symbol)
3458 && (pool->literals[entry].X_op_symbol
3459 == inst.relocs[0].exp.X_op_symbol)
3460 && (pool->literals[entry].X_md == nbytes))
3461 break;
3462 }
3463 else if ((nbytes == 8)
3464 && !(pool_size & 0x7)
3465 && ((entry + 1) != pool->next_free_entry)
3466 && (pool->literals[entry].X_op == O_constant)
3467 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3468 && (pool->literals[entry].X_unsigned
3469 == inst.relocs[0].exp.X_unsigned)
3470 && (pool->literals[entry + 1].X_op == O_constant)
3471 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3472 && (pool->literals[entry + 1].X_unsigned
3473 == inst.relocs[0].exp.X_unsigned))
3474 break;
3475
3476 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3477 if (padding_slot_p && (nbytes == 4))
3478 break;
3479
3480 pool_size += 4;
3481 }
3482
3483 /* Do we need to create a new entry? */
3484 if (entry == pool->next_free_entry)
3485 {
3486 if (entry >= MAX_LITERAL_POOL_SIZE)
3487 {
3488 inst.error = _("literal pool overflow");
3489 return FAIL;
3490 }
3491
3492 if (nbytes == 8)
3493 {
3494 /* For 8-byte entries, we align to an 8-byte boundary,
3495 and split it into two 4-byte entries, because on 32-bit
3496 host, 8-byte constants are treated as big num, thus
3497 saved in "generic_bignum" which will be overwritten
3498 by later assignments.
3499
3500 We also need to make sure there is enough space for
3501 the split.
3502
3503 We also check to make sure the literal operand is a
3504 constant number. */
3505 if (!(inst.relocs[0].exp.X_op == O_constant
3506 || inst.relocs[0].exp.X_op == O_big))
3507 {
3508 inst.error = _("invalid type for literal pool");
3509 return FAIL;
3510 }
3511 else if (pool_size & 0x7)
3512 {
3513 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3514 {
3515 inst.error = _("literal pool overflow");
3516 return FAIL;
3517 }
3518
3519 pool->literals[entry] = inst.relocs[0].exp;
3520 pool->literals[entry].X_op = O_constant;
3521 pool->literals[entry].X_add_number = 0;
3522 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3523 pool->next_free_entry += 1;
3524 pool_size += 4;
3525 }
3526 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3527 {
3528 inst.error = _("literal pool overflow");
3529 return FAIL;
3530 }
3531
3532 pool->literals[entry] = inst.relocs[0].exp;
3533 pool->literals[entry].X_op = O_constant;
3534 pool->literals[entry].X_add_number = imm1;
3535 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3536 pool->literals[entry++].X_md = 4;
3537 pool->literals[entry] = inst.relocs[0].exp;
3538 pool->literals[entry].X_op = O_constant;
3539 pool->literals[entry].X_add_number = imm2;
3540 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3541 pool->literals[entry].X_md = 4;
3542 pool->alignment = 3;
3543 pool->next_free_entry += 1;
3544 }
3545 else
3546 {
3547 pool->literals[entry] = inst.relocs[0].exp;
3548 pool->literals[entry].X_md = 4;
3549 }
3550
3551 #ifdef OBJ_ELF
3552 /* PR ld/12974: Record the location of the first source line to reference
3553 this entry in the literal pool. If it turns out during linking that the
3554 symbol does not exist we will be able to give an accurate line number for
3555 the (first use of the) missing reference. */
3556 if (debug_type == DEBUG_DWARF2)
3557 dwarf2_where (pool->locs + entry);
3558 #endif
3559 pool->next_free_entry += 1;
3560 }
3561 else if (padding_slot_p)
3562 {
3563 pool->literals[entry] = inst.relocs[0].exp;
3564 pool->literals[entry].X_md = nbytes;
3565 }
3566
3567 inst.relocs[0].exp.X_op = O_symbol;
3568 inst.relocs[0].exp.X_add_number = pool_size;
3569 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3570
3571 return SUCCESS;
3572 }
3573
3574 bfd_boolean
3575 tc_start_label_without_colon (void)
3576 {
3577 bfd_boolean ret = TRUE;
3578
3579 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3580 {
3581 const char *label = input_line_pointer;
3582
3583 while (!is_end_of_line[(int) label[-1]])
3584 --label;
3585
3586 if (*label == '.')
3587 {
3588 as_bad (_("Invalid label '%s'"), label);
3589 ret = FALSE;
3590 }
3591
3592 asmfunc_debug (label);
3593
3594 asmfunc_state = WAITING_ENDASMFUNC;
3595 }
3596
3597 return ret;
3598 }
3599
3600 /* Can't use symbol_new here, so have to create a symbol and then at
3601 a later date assign it a value. That's what these functions do. */
3602
3603 static void
3604 symbol_locate (symbolS * symbolP,
3605 const char * name, /* It is copied, the caller can modify. */
3606 segT segment, /* Segment identifier (SEG_<something>). */
3607 valueT valu, /* Symbol value. */
3608 fragS * frag) /* Associated fragment. */
3609 {
3610 size_t name_length;
3611 char * preserved_copy_of_name;
3612
3613 name_length = strlen (name) + 1; /* +1 for \0. */
3614 obstack_grow (&notes, name, name_length);
3615 preserved_copy_of_name = (char *) obstack_finish (&notes);
3616
3617 #ifdef tc_canonicalize_symbol_name
3618 preserved_copy_of_name =
3619 tc_canonicalize_symbol_name (preserved_copy_of_name);
3620 #endif
3621
3622 S_SET_NAME (symbolP, preserved_copy_of_name);
3623
3624 S_SET_SEGMENT (symbolP, segment);
3625 S_SET_VALUE (symbolP, valu);
3626 symbol_clear_list_pointers (symbolP);
3627
3628 symbol_set_frag (symbolP, frag);
3629
3630 /* Link to end of symbol chain. */
3631 {
3632 extern int symbol_table_frozen;
3633
3634 if (symbol_table_frozen)
3635 abort ();
3636 }
3637
3638 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3639
3640 obj_symbol_new_hook (symbolP);
3641
3642 #ifdef tc_symbol_new_hook
3643 tc_symbol_new_hook (symbolP);
3644 #endif
3645
3646 #ifdef DEBUG_SYMS
3647 verify_symbol_chain (symbol_rootP, symbol_lastP);
3648 #endif /* DEBUG_SYMS */
3649 }
3650
3651 static void
3652 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3653 {
3654 unsigned int entry;
3655 literal_pool * pool;
3656 char sym_name[20];
3657
3658 pool = find_literal_pool ();
3659 if (pool == NULL
3660 || pool->symbol == NULL
3661 || pool->next_free_entry == 0)
3662 return;
3663
3664 /* Align pool as you have word accesses.
3665 Only make a frag if we have to. */
3666 if (!need_pass_2)
3667 frag_align (pool->alignment, 0, 0);
3668
3669 record_alignment (now_seg, 2);
3670
3671 #ifdef OBJ_ELF
3672 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3673 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3674 #endif
3675 sprintf (sym_name, "$$lit_\002%x", pool->id);
3676
3677 symbol_locate (pool->symbol, sym_name, now_seg,
3678 (valueT) frag_now_fix (), frag_now);
3679 symbol_table_insert (pool->symbol);
3680
3681 ARM_SET_THUMB (pool->symbol, thumb_mode);
3682
3683 #if defined OBJ_COFF || defined OBJ_ELF
3684 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3685 #endif
3686
3687 for (entry = 0; entry < pool->next_free_entry; entry ++)
3688 {
3689 #ifdef OBJ_ELF
3690 if (debug_type == DEBUG_DWARF2)
3691 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3692 #endif
3693 /* First output the expression in the instruction to the pool. */
3694 emit_expr (&(pool->literals[entry]),
3695 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3696 }
3697
3698 /* Mark the pool as empty. */
3699 pool->next_free_entry = 0;
3700 pool->symbol = NULL;
3701 }
3702
3703 #ifdef OBJ_ELF
3704 /* Forward declarations for functions below, in the MD interface
3705 section. */
3706 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3707 static valueT create_unwind_entry (int);
3708 static void start_unwind_section (const segT, int);
3709 static void add_unwind_opcode (valueT, int);
3710 static void flush_pending_unwind (void);
3711
3712 /* Directives: Data. */
3713
3714 static void
3715 s_arm_elf_cons (int nbytes)
3716 {
3717 expressionS exp;
3718
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3721 #endif
3722
3723 if (is_it_end_of_statement ())
3724 {
3725 demand_empty_rest_of_line ();
3726 return;
3727 }
3728
3729 #ifdef md_cons_align
3730 md_cons_align (nbytes);
3731 #endif
3732
3733 mapping_state (MAP_DATA);
3734 do
3735 {
3736 int reloc;
3737 char *base = input_line_pointer;
3738
3739 expression (& exp);
3740
3741 if (exp.X_op != O_symbol)
3742 emit_expr (&exp, (unsigned int) nbytes);
3743 else
3744 {
3745 char *before_reloc = input_line_pointer;
3746 reloc = parse_reloc (&input_line_pointer);
3747 if (reloc == -1)
3748 {
3749 as_bad (_("unrecognized relocation suffix"));
3750 ignore_rest_of_line ();
3751 return;
3752 }
3753 else if (reloc == BFD_RELOC_UNUSED)
3754 emit_expr (&exp, (unsigned int) nbytes);
3755 else
3756 {
3757 reloc_howto_type *howto = (reloc_howto_type *)
3758 bfd_reloc_type_lookup (stdoutput,
3759 (bfd_reloc_code_real_type) reloc);
3760 int size = bfd_get_reloc_size (howto);
3761
3762 if (reloc == BFD_RELOC_ARM_PLT32)
3763 {
3764 as_bad (_("(plt) is only valid on branch targets"));
3765 reloc = BFD_RELOC_UNUSED;
3766 size = 0;
3767 }
3768
3769 if (size > nbytes)
3770 as_bad (ngettext ("%s relocations do not fit in %d byte",
3771 "%s relocations do not fit in %d bytes",
3772 nbytes),
3773 howto->name, nbytes);
3774 else
3775 {
3776 /* We've parsed an expression stopping at O_symbol.
3777 But there may be more expression left now that we
3778 have parsed the relocation marker. Parse it again.
3779 XXX Surely there is a cleaner way to do this. */
3780 char *p = input_line_pointer;
3781 int offset;
3782 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3783
3784 memcpy (save_buf, base, input_line_pointer - base);
3785 memmove (base + (input_line_pointer - before_reloc),
3786 base, before_reloc - base);
3787
3788 input_line_pointer = base + (input_line_pointer-before_reloc);
3789 expression (&exp);
3790 memcpy (base, save_buf, p - base);
3791
3792 offset = nbytes - size;
3793 p = frag_more (nbytes);
3794 memset (p, 0, nbytes);
3795 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3796 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3797 free (save_buf);
3798 }
3799 }
3800 }
3801 }
3802 while (*input_line_pointer++ == ',');
3803
3804 /* Put terminator back into stream. */
3805 input_line_pointer --;
3806 demand_empty_rest_of_line ();
3807 }
3808
3809 /* Emit an expression containing a 32-bit thumb instruction.
3810 Implementation based on put_thumb32_insn. */
3811
3812 static void
3813 emit_thumb32_expr (expressionS * exp)
3814 {
3815 expressionS exp_high = *exp;
3816
3817 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3818 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3819 exp->X_add_number &= 0xffff;
3820 emit_expr (exp, (unsigned int) THUMB_SIZE);
3821 }
3822
3823 /* Guess the instruction size based on the opcode. */
3824
3825 static int
3826 thumb_insn_size (int opcode)
3827 {
3828 if ((unsigned int) opcode < 0xe800u)
3829 return 2;
3830 else if ((unsigned int) opcode >= 0xe8000000u)
3831 return 4;
3832 else
3833 return 0;
3834 }
3835
3836 static bfd_boolean
3837 emit_insn (expressionS *exp, int nbytes)
3838 {
3839 int size = 0;
3840
3841 if (exp->X_op == O_constant)
3842 {
3843 size = nbytes;
3844
3845 if (size == 0)
3846 size = thumb_insn_size (exp->X_add_number);
3847
3848 if (size != 0)
3849 {
3850 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3851 {
3852 as_bad (_(".inst.n operand too big. "\
3853 "Use .inst.w instead"));
3854 size = 0;
3855 }
3856 else
3857 {
3858 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3859 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3860 else
3861 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3862
3863 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3864 emit_thumb32_expr (exp);
3865 else
3866 emit_expr (exp, (unsigned int) size);
3867
3868 it_fsm_post_encode ();
3869 }
3870 }
3871 else
3872 as_bad (_("cannot determine Thumb instruction size. " \
3873 "Use .inst.n/.inst.w instead"));
3874 }
3875 else
3876 as_bad (_("constant expression required"));
3877
3878 return (size != 0);
3879 }
3880
3881 /* Like s_arm_elf_cons but do not use md_cons_align and
3882 set the mapping state to MAP_ARM/MAP_THUMB. */
3883
3884 static void
3885 s_arm_elf_inst (int nbytes)
3886 {
3887 if (is_it_end_of_statement ())
3888 {
3889 demand_empty_rest_of_line ();
3890 return;
3891 }
3892
3893 /* Calling mapping_state () here will not change ARM/THUMB,
3894 but will ensure not to be in DATA state. */
3895
3896 if (thumb_mode)
3897 mapping_state (MAP_THUMB);
3898 else
3899 {
3900 if (nbytes != 0)
3901 {
3902 as_bad (_("width suffixes are invalid in ARM mode"));
3903 ignore_rest_of_line ();
3904 return;
3905 }
3906
3907 nbytes = 4;
3908
3909 mapping_state (MAP_ARM);
3910 }
3911
3912 do
3913 {
3914 expressionS exp;
3915
3916 expression (& exp);
3917
3918 if (! emit_insn (& exp, nbytes))
3919 {
3920 ignore_rest_of_line ();
3921 return;
3922 }
3923 }
3924 while (*input_line_pointer++ == ',');
3925
3926 /* Put terminator back into stream. */
3927 input_line_pointer --;
3928 demand_empty_rest_of_line ();
3929 }
3930
3931 /* Parse a .rel31 directive. */
3932
3933 static void
3934 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3935 {
3936 expressionS exp;
3937 char *p;
3938 valueT highbit;
3939
3940 highbit = 0;
3941 if (*input_line_pointer == '1')
3942 highbit = 0x80000000;
3943 else if (*input_line_pointer != '0')
3944 as_bad (_("expected 0 or 1"));
3945
3946 input_line_pointer++;
3947 if (*input_line_pointer != ',')
3948 as_bad (_("missing comma"));
3949 input_line_pointer++;
3950
3951 #ifdef md_flush_pending_output
3952 md_flush_pending_output ();
3953 #endif
3954
3955 #ifdef md_cons_align
3956 md_cons_align (4);
3957 #endif
3958
3959 mapping_state (MAP_DATA);
3960
3961 expression (&exp);
3962
3963 p = frag_more (4);
3964 md_number_to_chars (p, highbit, 4);
3965 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3966 BFD_RELOC_ARM_PREL31);
3967
3968 demand_empty_rest_of_line ();
3969 }
3970
3971 /* Directives: AEABI stack-unwind tables. */
3972
3973 /* Parse an unwind_fnstart directive. Simply records the current location. */
3974
3975 static void
3976 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3977 {
3978 demand_empty_rest_of_line ();
3979 if (unwind.proc_start)
3980 {
3981 as_bad (_("duplicate .fnstart directive"));
3982 return;
3983 }
3984
3985 /* Mark the start of the function. */
3986 unwind.proc_start = expr_build_dot ();
3987
3988 /* Reset the rest of the unwind info. */
3989 unwind.opcode_count = 0;
3990 unwind.table_entry = NULL;
3991 unwind.personality_routine = NULL;
3992 unwind.personality_index = -1;
3993 unwind.frame_size = 0;
3994 unwind.fp_offset = 0;
3995 unwind.fp_reg = REG_SP;
3996 unwind.fp_used = 0;
3997 unwind.sp_restored = 0;
3998 }
3999
4000
4001 /* Parse a handlerdata directive. Creates the exception handling table entry
4002 for the function. */
4003
4004 static void
4005 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4006 {
4007 demand_empty_rest_of_line ();
4008 if (!unwind.proc_start)
4009 as_bad (MISSING_FNSTART);
4010
4011 if (unwind.table_entry)
4012 as_bad (_("duplicate .handlerdata directive"));
4013
4014 create_unwind_entry (1);
4015 }
4016
4017 /* Parse an unwind_fnend directive. Generates the index table entry. */
4018
4019 static void
4020 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4021 {
4022 long where;
4023 char *ptr;
4024 valueT val;
4025 unsigned int marked_pr_dependency;
4026
4027 demand_empty_rest_of_line ();
4028
4029 if (!unwind.proc_start)
4030 {
4031 as_bad (_(".fnend directive without .fnstart"));
4032 return;
4033 }
4034
4035 /* Add eh table entry. */
4036 if (unwind.table_entry == NULL)
4037 val = create_unwind_entry (0);
4038 else
4039 val = 0;
4040
4041 /* Add index table entry. This is two words. */
4042 start_unwind_section (unwind.saved_seg, 1);
4043 frag_align (2, 0, 0);
4044 record_alignment (now_seg, 2);
4045
4046 ptr = frag_more (8);
4047 memset (ptr, 0, 8);
4048 where = frag_now_fix () - 8;
4049
4050 /* Self relative offset of the function start. */
4051 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4052 BFD_RELOC_ARM_PREL31);
4053
4054 /* Indicate dependency on EHABI-defined personality routines to the
4055 linker, if it hasn't been done already. */
4056 marked_pr_dependency
4057 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4058 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4059 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4060 {
4061 static const char *const name[] =
4062 {
4063 "__aeabi_unwind_cpp_pr0",
4064 "__aeabi_unwind_cpp_pr1",
4065 "__aeabi_unwind_cpp_pr2"
4066 };
4067 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4068 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4069 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4070 |= 1 << unwind.personality_index;
4071 }
4072
4073 if (val)
4074 /* Inline exception table entry. */
4075 md_number_to_chars (ptr + 4, val, 4);
4076 else
4077 /* Self relative offset of the table entry. */
4078 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4079 BFD_RELOC_ARM_PREL31);
4080
4081 /* Restore the original section. */
4082 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4083
4084 unwind.proc_start = NULL;
4085 }
4086
4087
4088 /* Parse an unwind_cantunwind directive. */
4089
4090 static void
4091 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4092 {
4093 demand_empty_rest_of_line ();
4094 if (!unwind.proc_start)
4095 as_bad (MISSING_FNSTART);
4096
4097 if (unwind.personality_routine || unwind.personality_index != -1)
4098 as_bad (_("personality routine specified for cantunwind frame"));
4099
4100 unwind.personality_index = -2;
4101 }
4102
4103
4104 /* Parse a personalityindex directive. */
4105
4106 static void
4107 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4108 {
4109 expressionS exp;
4110
4111 if (!unwind.proc_start)
4112 as_bad (MISSING_FNSTART);
4113
4114 if (unwind.personality_routine || unwind.personality_index != -1)
4115 as_bad (_("duplicate .personalityindex directive"));
4116
4117 expression (&exp);
4118
4119 if (exp.X_op != O_constant
4120 || exp.X_add_number < 0 || exp.X_add_number > 15)
4121 {
4122 as_bad (_("bad personality routine number"));
4123 ignore_rest_of_line ();
4124 return;
4125 }
4126
4127 unwind.personality_index = exp.X_add_number;
4128
4129 demand_empty_rest_of_line ();
4130 }
4131
4132
4133 /* Parse a personality directive. */
4134
4135 static void
4136 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4137 {
4138 char *name, *p, c;
4139
4140 if (!unwind.proc_start)
4141 as_bad (MISSING_FNSTART);
4142
4143 if (unwind.personality_routine || unwind.personality_index != -1)
4144 as_bad (_("duplicate .personality directive"));
4145
4146 c = get_symbol_name (& name);
4147 p = input_line_pointer;
4148 if (c == '"')
4149 ++ input_line_pointer;
4150 unwind.personality_routine = symbol_find_or_make (name);
4151 *p = c;
4152 demand_empty_rest_of_line ();
4153 }
4154
4155
4156 /* Parse a directive saving core registers. */
4157
4158 static void
4159 s_arm_unwind_save_core (void)
4160 {
4161 valueT op;
4162 long range;
4163 int n;
4164
4165 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4166 if (range == FAIL)
4167 {
4168 as_bad (_("expected register list"));
4169 ignore_rest_of_line ();
4170 return;
4171 }
4172
4173 demand_empty_rest_of_line ();
4174
4175 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4176 into .unwind_save {..., sp...}. We aren't bothered about the value of
4177 ip because it is clobbered by calls. */
4178 if (unwind.sp_restored && unwind.fp_reg == 12
4179 && (range & 0x3000) == 0x1000)
4180 {
4181 unwind.opcode_count--;
4182 unwind.sp_restored = 0;
4183 range = (range | 0x2000) & ~0x1000;
4184 unwind.pending_offset = 0;
4185 }
4186
4187 /* Pop r4-r15. */
4188 if (range & 0xfff0)
4189 {
4190 /* See if we can use the short opcodes. These pop a block of up to 8
4191 registers starting with r4, plus maybe r14. */
4192 for (n = 0; n < 8; n++)
4193 {
4194 /* Break at the first non-saved register. */
4195 if ((range & (1 << (n + 4))) == 0)
4196 break;
4197 }
4198 /* See if there are any other bits set. */
4199 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4200 {
4201 /* Use the long form. */
4202 op = 0x8000 | ((range >> 4) & 0xfff);
4203 add_unwind_opcode (op, 2);
4204 }
4205 else
4206 {
4207 /* Use the short form. */
4208 if (range & 0x4000)
4209 op = 0xa8; /* Pop r14. */
4210 else
4211 op = 0xa0; /* Do not pop r14. */
4212 op |= (n - 1);
4213 add_unwind_opcode (op, 1);
4214 }
4215 }
4216
4217 /* Pop r0-r3. */
4218 if (range & 0xf)
4219 {
4220 op = 0xb100 | (range & 0xf);
4221 add_unwind_opcode (op, 2);
4222 }
4223
4224 /* Record the number of bytes pushed. */
4225 for (n = 0; n < 16; n++)
4226 {
4227 if (range & (1 << n))
4228 unwind.frame_size += 4;
4229 }
4230 }
4231
4232
4233 /* Parse a directive saving FPA registers. */
4234
4235 static void
4236 s_arm_unwind_save_fpa (int reg)
4237 {
4238 expressionS exp;
4239 int num_regs;
4240 valueT op;
4241
4242 /* Get Number of registers to transfer. */
4243 if (skip_past_comma (&input_line_pointer) != FAIL)
4244 expression (&exp);
4245 else
4246 exp.X_op = O_illegal;
4247
4248 if (exp.X_op != O_constant)
4249 {
4250 as_bad (_("expected , <constant>"));
4251 ignore_rest_of_line ();
4252 return;
4253 }
4254
4255 num_regs = exp.X_add_number;
4256
4257 if (num_regs < 1 || num_regs > 4)
4258 {
4259 as_bad (_("number of registers must be in the range [1:4]"));
4260 ignore_rest_of_line ();
4261 return;
4262 }
4263
4264 demand_empty_rest_of_line ();
4265
4266 if (reg == 4)
4267 {
4268 /* Short form. */
4269 op = 0xb4 | (num_regs - 1);
4270 add_unwind_opcode (op, 1);
4271 }
4272 else
4273 {
4274 /* Long form. */
4275 op = 0xc800 | (reg << 4) | (num_regs - 1);
4276 add_unwind_opcode (op, 2);
4277 }
4278 unwind.frame_size += num_regs * 12;
4279 }
4280
4281
4282 /* Parse a directive saving VFP registers for ARMv6 and above. */
4283
4284 static void
4285 s_arm_unwind_save_vfp_armv6 (void)
4286 {
4287 int count;
4288 unsigned int start;
4289 valueT op;
4290 int num_vfpv3_regs = 0;
4291 int num_regs_below_16;
4292 bfd_boolean partial_match;
4293
4294 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4295 &partial_match);
4296 if (count == FAIL)
4297 {
4298 as_bad (_("expected register list"));
4299 ignore_rest_of_line ();
4300 return;
4301 }
4302
4303 demand_empty_rest_of_line ();
4304
4305 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4306 than FSTMX/FLDMX-style ones). */
4307
4308 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4309 if (start >= 16)
4310 num_vfpv3_regs = count;
4311 else if (start + count > 16)
4312 num_vfpv3_regs = start + count - 16;
4313
4314 if (num_vfpv3_regs > 0)
4315 {
4316 int start_offset = start > 16 ? start - 16 : 0;
4317 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4318 add_unwind_opcode (op, 2);
4319 }
4320
4321 /* Generate opcode for registers numbered in the range 0 .. 15. */
4322 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4323 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4324 if (num_regs_below_16 > 0)
4325 {
4326 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4327 add_unwind_opcode (op, 2);
4328 }
4329
4330 unwind.frame_size += count * 8;
4331 }
4332
4333
4334 /* Parse a directive saving VFP registers for pre-ARMv6. */
4335
4336 static void
4337 s_arm_unwind_save_vfp (void)
4338 {
4339 int count;
4340 unsigned int reg;
4341 valueT op;
4342 bfd_boolean partial_match;
4343
4344 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4345 &partial_match);
4346 if (count == FAIL)
4347 {
4348 as_bad (_("expected register list"));
4349 ignore_rest_of_line ();
4350 return;
4351 }
4352
4353 demand_empty_rest_of_line ();
4354
4355 if (reg == 8)
4356 {
4357 /* Short form. */
4358 op = 0xb8 | (count - 1);
4359 add_unwind_opcode (op, 1);
4360 }
4361 else
4362 {
4363 /* Long form. */
4364 op = 0xb300 | (reg << 4) | (count - 1);
4365 add_unwind_opcode (op, 2);
4366 }
4367 unwind.frame_size += count * 8 + 4;
4368 }
4369
4370
4371 /* Parse a directive saving iWMMXt data registers. */
4372
4373 static void
4374 s_arm_unwind_save_mmxwr (void)
4375 {
4376 int reg;
4377 int hi_reg;
4378 int i;
4379 unsigned mask = 0;
4380 valueT op;
4381
4382 if (*input_line_pointer == '{')
4383 input_line_pointer++;
4384
4385 do
4386 {
4387 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4388
4389 if (reg == FAIL)
4390 {
4391 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4392 goto error;
4393 }
4394
4395 if (mask >> reg)
4396 as_tsktsk (_("register list not in ascending order"));
4397 mask |= 1 << reg;
4398
4399 if (*input_line_pointer == '-')
4400 {
4401 input_line_pointer++;
4402 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4403 if (hi_reg == FAIL)
4404 {
4405 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4406 goto error;
4407 }
4408 else if (reg >= hi_reg)
4409 {
4410 as_bad (_("bad register range"));
4411 goto error;
4412 }
4413 for (; reg < hi_reg; reg++)
4414 mask |= 1 << reg;
4415 }
4416 }
4417 while (skip_past_comma (&input_line_pointer) != FAIL);
4418
4419 skip_past_char (&input_line_pointer, '}');
4420
4421 demand_empty_rest_of_line ();
4422
4423 /* Generate any deferred opcodes because we're going to be looking at
4424 the list. */
4425 flush_pending_unwind ();
4426
4427 for (i = 0; i < 16; i++)
4428 {
4429 if (mask & (1 << i))
4430 unwind.frame_size += 8;
4431 }
4432
4433 /* Attempt to combine with a previous opcode. We do this because gcc
4434 likes to output separate unwind directives for a single block of
4435 registers. */
4436 if (unwind.opcode_count > 0)
4437 {
4438 i = unwind.opcodes[unwind.opcode_count - 1];
4439 if ((i & 0xf8) == 0xc0)
4440 {
4441 i &= 7;
4442 /* Only merge if the blocks are contiguous. */
4443 if (i < 6)
4444 {
4445 if ((mask & 0xfe00) == (1 << 9))
4446 {
4447 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4448 unwind.opcode_count--;
4449 }
4450 }
4451 else if (i == 6 && unwind.opcode_count >= 2)
4452 {
4453 i = unwind.opcodes[unwind.opcode_count - 2];
4454 reg = i >> 4;
4455 i &= 0xf;
4456
4457 op = 0xffff << (reg - 1);
4458 if (reg > 0
4459 && ((mask & op) == (1u << (reg - 1))))
4460 {
4461 op = (1 << (reg + i + 1)) - 1;
4462 op &= ~((1 << reg) - 1);
4463 mask |= op;
4464 unwind.opcode_count -= 2;
4465 }
4466 }
4467 }
4468 }
4469
4470 hi_reg = 15;
4471 /* We want to generate opcodes in the order the registers have been
4472 saved, ie. descending order. */
4473 for (reg = 15; reg >= -1; reg--)
4474 {
4475 /* Save registers in blocks. */
4476 if (reg < 0
4477 || !(mask & (1 << reg)))
4478 {
4479 /* We found an unsaved reg. Generate opcodes to save the
4480 preceding block. */
4481 if (reg != hi_reg)
4482 {
4483 if (reg == 9)
4484 {
4485 /* Short form. */
4486 op = 0xc0 | (hi_reg - 10);
4487 add_unwind_opcode (op, 1);
4488 }
4489 else
4490 {
4491 /* Long form. */
4492 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4493 add_unwind_opcode (op, 2);
4494 }
4495 }
4496 hi_reg = reg - 1;
4497 }
4498 }
4499
4500 return;
4501 error:
4502 ignore_rest_of_line ();
4503 }
4504
4505 static void
4506 s_arm_unwind_save_mmxwcg (void)
4507 {
4508 int reg;
4509 int hi_reg;
4510 unsigned mask = 0;
4511 valueT op;
4512
4513 if (*input_line_pointer == '{')
4514 input_line_pointer++;
4515
4516 skip_whitespace (input_line_pointer);
4517
4518 do
4519 {
4520 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4521
4522 if (reg == FAIL)
4523 {
4524 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4525 goto error;
4526 }
4527
4528 reg -= 8;
4529 if (mask >> reg)
4530 as_tsktsk (_("register list not in ascending order"));
4531 mask |= 1 << reg;
4532
4533 if (*input_line_pointer == '-')
4534 {
4535 input_line_pointer++;
4536 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4537 if (hi_reg == FAIL)
4538 {
4539 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4540 goto error;
4541 }
4542 else if (reg >= hi_reg)
4543 {
4544 as_bad (_("bad register range"));
4545 goto error;
4546 }
4547 for (; reg < hi_reg; reg++)
4548 mask |= 1 << reg;
4549 }
4550 }
4551 while (skip_past_comma (&input_line_pointer) != FAIL);
4552
4553 skip_past_char (&input_line_pointer, '}');
4554
4555 demand_empty_rest_of_line ();
4556
4557 /* Generate any deferred opcodes because we're going to be looking at
4558 the list. */
4559 flush_pending_unwind ();
4560
4561 for (reg = 0; reg < 16; reg++)
4562 {
4563 if (mask & (1 << reg))
4564 unwind.frame_size += 4;
4565 }
4566 op = 0xc700 | mask;
4567 add_unwind_opcode (op, 2);
4568 return;
4569 error:
4570 ignore_rest_of_line ();
4571 }
4572
4573
4574 /* Parse an unwind_save directive.
4575 If the argument is non-zero, this is a .vsave directive. */
4576
4577 static void
4578 s_arm_unwind_save (int arch_v6)
4579 {
4580 char *peek;
4581 struct reg_entry *reg;
4582 bfd_boolean had_brace = FALSE;
4583
4584 if (!unwind.proc_start)
4585 as_bad (MISSING_FNSTART);
4586
4587 /* Figure out what sort of save we have. */
4588 peek = input_line_pointer;
4589
4590 if (*peek == '{')
4591 {
4592 had_brace = TRUE;
4593 peek++;
4594 }
4595
4596 reg = arm_reg_parse_multi (&peek);
4597
4598 if (!reg)
4599 {
4600 as_bad (_("register expected"));
4601 ignore_rest_of_line ();
4602 return;
4603 }
4604
4605 switch (reg->type)
4606 {
4607 case REG_TYPE_FN:
4608 if (had_brace)
4609 {
4610 as_bad (_("FPA .unwind_save does not take a register list"));
4611 ignore_rest_of_line ();
4612 return;
4613 }
4614 input_line_pointer = peek;
4615 s_arm_unwind_save_fpa (reg->number);
4616 return;
4617
4618 case REG_TYPE_RN:
4619 s_arm_unwind_save_core ();
4620 return;
4621
4622 case REG_TYPE_VFD:
4623 if (arch_v6)
4624 s_arm_unwind_save_vfp_armv6 ();
4625 else
4626 s_arm_unwind_save_vfp ();
4627 return;
4628
4629 case REG_TYPE_MMXWR:
4630 s_arm_unwind_save_mmxwr ();
4631 return;
4632
4633 case REG_TYPE_MMXWCG:
4634 s_arm_unwind_save_mmxwcg ();
4635 return;
4636
4637 default:
4638 as_bad (_(".unwind_save does not support this kind of register"));
4639 ignore_rest_of_line ();
4640 }
4641 }
4642
4643
4644 /* Parse an unwind_movsp directive. */
4645
4646 static void
4647 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4648 {
4649 int reg;
4650 valueT op;
4651 int offset;
4652
4653 if (!unwind.proc_start)
4654 as_bad (MISSING_FNSTART);
4655
4656 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4657 if (reg == FAIL)
4658 {
4659 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4660 ignore_rest_of_line ();
4661 return;
4662 }
4663
4664 /* Optional constant. */
4665 if (skip_past_comma (&input_line_pointer) != FAIL)
4666 {
4667 if (immediate_for_directive (&offset) == FAIL)
4668 return;
4669 }
4670 else
4671 offset = 0;
4672
4673 demand_empty_rest_of_line ();
4674
4675 if (reg == REG_SP || reg == REG_PC)
4676 {
4677 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4678 return;
4679 }
4680
4681 if (unwind.fp_reg != REG_SP)
4682 as_bad (_("unexpected .unwind_movsp directive"));
4683
4684 /* Generate opcode to restore the value. */
4685 op = 0x90 | reg;
4686 add_unwind_opcode (op, 1);
4687
4688 /* Record the information for later. */
4689 unwind.fp_reg = reg;
4690 unwind.fp_offset = unwind.frame_size - offset;
4691 unwind.sp_restored = 1;
4692 }
4693
4694 /* Parse an unwind_pad directive. */
4695
4696 static void
4697 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4698 {
4699 int offset;
4700
4701 if (!unwind.proc_start)
4702 as_bad (MISSING_FNSTART);
4703
4704 if (immediate_for_directive (&offset) == FAIL)
4705 return;
4706
4707 if (offset & 3)
4708 {
4709 as_bad (_("stack increment must be multiple of 4"));
4710 ignore_rest_of_line ();
4711 return;
4712 }
4713
4714 /* Don't generate any opcodes, just record the details for later. */
4715 unwind.frame_size += offset;
4716 unwind.pending_offset += offset;
4717
4718 demand_empty_rest_of_line ();
4719 }
4720
4721 /* Parse an unwind_setfp directive. */
4722
4723 static void
4724 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4725 {
4726 int sp_reg;
4727 int fp_reg;
4728 int offset;
4729
4730 if (!unwind.proc_start)
4731 as_bad (MISSING_FNSTART);
4732
4733 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4734 if (skip_past_comma (&input_line_pointer) == FAIL)
4735 sp_reg = FAIL;
4736 else
4737 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4738
4739 if (fp_reg == FAIL || sp_reg == FAIL)
4740 {
4741 as_bad (_("expected <reg>, <reg>"));
4742 ignore_rest_of_line ();
4743 return;
4744 }
4745
4746 /* Optional constant. */
4747 if (skip_past_comma (&input_line_pointer) != FAIL)
4748 {
4749 if (immediate_for_directive (&offset) == FAIL)
4750 return;
4751 }
4752 else
4753 offset = 0;
4754
4755 demand_empty_rest_of_line ();
4756
4757 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4758 {
4759 as_bad (_("register must be either sp or set by a previous"
4760 "unwind_movsp directive"));
4761 return;
4762 }
4763
4764 /* Don't generate any opcodes, just record the information for later. */
4765 unwind.fp_reg = fp_reg;
4766 unwind.fp_used = 1;
4767 if (sp_reg == REG_SP)
4768 unwind.fp_offset = unwind.frame_size - offset;
4769 else
4770 unwind.fp_offset -= offset;
4771 }
4772
4773 /* Parse an unwind_raw directive. */
4774
4775 static void
4776 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4777 {
4778 expressionS exp;
4779 /* This is an arbitrary limit. */
4780 unsigned char op[16];
4781 int count;
4782
4783 if (!unwind.proc_start)
4784 as_bad (MISSING_FNSTART);
4785
4786 expression (&exp);
4787 if (exp.X_op == O_constant
4788 && skip_past_comma (&input_line_pointer) != FAIL)
4789 {
4790 unwind.frame_size += exp.X_add_number;
4791 expression (&exp);
4792 }
4793 else
4794 exp.X_op = O_illegal;
4795
4796 if (exp.X_op != O_constant)
4797 {
4798 as_bad (_("expected <offset>, <opcode>"));
4799 ignore_rest_of_line ();
4800 return;
4801 }
4802
4803 count = 0;
4804
4805 /* Parse the opcode. */
4806 for (;;)
4807 {
4808 if (count >= 16)
4809 {
4810 as_bad (_("unwind opcode too long"));
4811 ignore_rest_of_line ();
4812 }
4813 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4814 {
4815 as_bad (_("invalid unwind opcode"));
4816 ignore_rest_of_line ();
4817 return;
4818 }
4819 op[count++] = exp.X_add_number;
4820
4821 /* Parse the next byte. */
4822 if (skip_past_comma (&input_line_pointer) == FAIL)
4823 break;
4824
4825 expression (&exp);
4826 }
4827
4828 /* Add the opcode bytes in reverse order. */
4829 while (count--)
4830 add_unwind_opcode (op[count], 1);
4831
4832 demand_empty_rest_of_line ();
4833 }
4834
4835
4836 /* Parse a .eabi_attribute directive. */
4837
4838 static void
4839 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4840 {
4841 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4842
4843 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4844 attributes_set_explicitly[tag] = 1;
4845 }
4846
4847 /* Emit a tls fix for the symbol. */
4848
4849 static void
4850 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4851 {
4852 char *p;
4853 expressionS exp;
4854 #ifdef md_flush_pending_output
4855 md_flush_pending_output ();
4856 #endif
4857
4858 #ifdef md_cons_align
4859 md_cons_align (4);
4860 #endif
4861
4862 /* Since we're just labelling the code, there's no need to define a
4863 mapping symbol. */
4864 expression (&exp);
4865 p = obstack_next_free (&frchain_now->frch_obstack);
4866 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4867 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4868 : BFD_RELOC_ARM_TLS_DESCSEQ);
4869 }
4870 #endif /* OBJ_ELF */
4871
4872 static void s_arm_arch (int);
4873 static void s_arm_object_arch (int);
4874 static void s_arm_cpu (int);
4875 static void s_arm_fpu (int);
4876 static void s_arm_arch_extension (int);
4877
4878 #ifdef TE_PE
4879
4880 static void
4881 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4882 {
4883 expressionS exp;
4884
4885 do
4886 {
4887 expression (&exp);
4888 if (exp.X_op == O_symbol)
4889 exp.X_op = O_secrel;
4890
4891 emit_expr (&exp, 4);
4892 }
4893 while (*input_line_pointer++ == ',');
4894
4895 input_line_pointer--;
4896 demand_empty_rest_of_line ();
4897 }
4898 #endif /* TE_PE */
4899
4900 /* This table describes all the machine specific pseudo-ops the assembler
4901 has to support. The fields are:
4902 pseudo-op name without dot
4903 function to call to execute this pseudo-op
4904 Integer arg to pass to the function. */
4905
4906 const pseudo_typeS md_pseudo_table[] =
4907 {
4908 /* Never called because '.req' does not start a line. */
4909 { "req", s_req, 0 },
4910 /* Following two are likewise never called. */
4911 { "dn", s_dn, 0 },
4912 { "qn", s_qn, 0 },
4913 { "unreq", s_unreq, 0 },
4914 { "bss", s_bss, 0 },
4915 { "align", s_align_ptwo, 2 },
4916 { "arm", s_arm, 0 },
4917 { "thumb", s_thumb, 0 },
4918 { "code", s_code, 0 },
4919 { "force_thumb", s_force_thumb, 0 },
4920 { "thumb_func", s_thumb_func, 0 },
4921 { "thumb_set", s_thumb_set, 0 },
4922 { "even", s_even, 0 },
4923 { "ltorg", s_ltorg, 0 },
4924 { "pool", s_ltorg, 0 },
4925 { "syntax", s_syntax, 0 },
4926 { "cpu", s_arm_cpu, 0 },
4927 { "arch", s_arm_arch, 0 },
4928 { "object_arch", s_arm_object_arch, 0 },
4929 { "fpu", s_arm_fpu, 0 },
4930 { "arch_extension", s_arm_arch_extension, 0 },
4931 #ifdef OBJ_ELF
4932 { "word", s_arm_elf_cons, 4 },
4933 { "long", s_arm_elf_cons, 4 },
4934 { "inst.n", s_arm_elf_inst, 2 },
4935 { "inst.w", s_arm_elf_inst, 4 },
4936 { "inst", s_arm_elf_inst, 0 },
4937 { "rel31", s_arm_rel31, 0 },
4938 { "fnstart", s_arm_unwind_fnstart, 0 },
4939 { "fnend", s_arm_unwind_fnend, 0 },
4940 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4941 { "personality", s_arm_unwind_personality, 0 },
4942 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4943 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4944 { "save", s_arm_unwind_save, 0 },
4945 { "vsave", s_arm_unwind_save, 1 },
4946 { "movsp", s_arm_unwind_movsp, 0 },
4947 { "pad", s_arm_unwind_pad, 0 },
4948 { "setfp", s_arm_unwind_setfp, 0 },
4949 { "unwind_raw", s_arm_unwind_raw, 0 },
4950 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4951 { "tlsdescseq", s_arm_tls_descseq, 0 },
4952 #else
4953 { "word", cons, 4},
4954
4955 /* These are used for dwarf. */
4956 {"2byte", cons, 2},
4957 {"4byte", cons, 4},
4958 {"8byte", cons, 8},
4959 /* These are used for dwarf2. */
4960 { "file", dwarf2_directive_file, 0 },
4961 { "loc", dwarf2_directive_loc, 0 },
4962 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4963 #endif
4964 { "extend", float_cons, 'x' },
4965 { "ldouble", float_cons, 'x' },
4966 { "packed", float_cons, 'p' },
4967 #ifdef TE_PE
4968 {"secrel32", pe_directive_secrel, 0},
4969 #endif
4970
4971 /* These are for compatibility with CodeComposer Studio. */
4972 {"ref", s_ccs_ref, 0},
4973 {"def", s_ccs_def, 0},
4974 {"asmfunc", s_ccs_asmfunc, 0},
4975 {"endasmfunc", s_ccs_endasmfunc, 0},
4976
4977 { 0, 0, 0 }
4978 };
4979 \f
4980 /* Parser functions used exclusively in instruction operands. */
4981
4982 /* Generic immediate-value read function for use in insn parsing.
4983 STR points to the beginning of the immediate (the leading #);
4984 VAL receives the value; if the value is outside [MIN, MAX]
4985 issue an error. PREFIX_OPT is true if the immediate prefix is
4986 optional. */
4987
4988 static int
4989 parse_immediate (char **str, int *val, int min, int max,
4990 bfd_boolean prefix_opt)
4991 {
4992 expressionS exp;
4993
4994 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4995 if (exp.X_op != O_constant)
4996 {
4997 inst.error = _("constant expression required");
4998 return FAIL;
4999 }
5000
5001 if (exp.X_add_number < min || exp.X_add_number > max)
5002 {
5003 inst.error = _("immediate value out of range");
5004 return FAIL;
5005 }
5006
5007 *val = exp.X_add_number;
5008 return SUCCESS;
5009 }
5010
5011 /* Less-generic immediate-value read function with the possibility of loading a
5012 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5013 instructions. Puts the result directly in inst.operands[i]. */
5014
5015 static int
5016 parse_big_immediate (char **str, int i, expressionS *in_exp,
5017 bfd_boolean allow_symbol_p)
5018 {
5019 expressionS exp;
5020 expressionS *exp_p = in_exp ? in_exp : &exp;
5021 char *ptr = *str;
5022
5023 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5024
5025 if (exp_p->X_op == O_constant)
5026 {
5027 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5028 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5029 O_constant. We have to be careful not to break compilation for
5030 32-bit X_add_number, though. */
5031 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5032 {
5033 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5034 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5035 & 0xffffffff);
5036 inst.operands[i].regisimm = 1;
5037 }
5038 }
5039 else if (exp_p->X_op == O_big
5040 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5041 {
5042 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5043
5044 /* Bignums have their least significant bits in
5045 generic_bignum[0]. Make sure we put 32 bits in imm and
5046 32 bits in reg, in a (hopefully) portable way. */
5047 gas_assert (parts != 0);
5048
5049 /* Make sure that the number is not too big.
5050 PR 11972: Bignums can now be sign-extended to the
5051 size of a .octa so check that the out of range bits
5052 are all zero or all one. */
5053 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5054 {
5055 LITTLENUM_TYPE m = -1;
5056
5057 if (generic_bignum[parts * 2] != 0
5058 && generic_bignum[parts * 2] != m)
5059 return FAIL;
5060
5061 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5062 if (generic_bignum[j] != generic_bignum[j-1])
5063 return FAIL;
5064 }
5065
5066 inst.operands[i].imm = 0;
5067 for (j = 0; j < parts; j++, idx++)
5068 inst.operands[i].imm |= generic_bignum[idx]
5069 << (LITTLENUM_NUMBER_OF_BITS * j);
5070 inst.operands[i].reg = 0;
5071 for (j = 0; j < parts; j++, idx++)
5072 inst.operands[i].reg |= generic_bignum[idx]
5073 << (LITTLENUM_NUMBER_OF_BITS * j);
5074 inst.operands[i].regisimm = 1;
5075 }
5076 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5077 return FAIL;
5078
5079 *str = ptr;
5080
5081 return SUCCESS;
5082 }
5083
5084 /* Returns the pseudo-register number of an FPA immediate constant,
5085 or FAIL if there isn't a valid constant here. */
5086
5087 static int
5088 parse_fpa_immediate (char ** str)
5089 {
5090 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5091 char * save_in;
5092 expressionS exp;
5093 int i;
5094 int j;
5095
5096 /* First try and match exact strings, this is to guarantee
5097 that some formats will work even for cross assembly. */
5098
5099 for (i = 0; fp_const[i]; i++)
5100 {
5101 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5102 {
5103 char *start = *str;
5104
5105 *str += strlen (fp_const[i]);
5106 if (is_end_of_line[(unsigned char) **str])
5107 return i + 8;
5108 *str = start;
5109 }
5110 }
5111
5112 /* Just because we didn't get a match doesn't mean that the constant
5113 isn't valid, just that it is in a format that we don't
5114 automatically recognize. Try parsing it with the standard
5115 expression routines. */
5116
5117 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5118
5119 /* Look for a raw floating point number. */
5120 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5121 && is_end_of_line[(unsigned char) *save_in])
5122 {
5123 for (i = 0; i < NUM_FLOAT_VALS; i++)
5124 {
5125 for (j = 0; j < MAX_LITTLENUMS; j++)
5126 {
5127 if (words[j] != fp_values[i][j])
5128 break;
5129 }
5130
5131 if (j == MAX_LITTLENUMS)
5132 {
5133 *str = save_in;
5134 return i + 8;
5135 }
5136 }
5137 }
5138
5139 /* Try and parse a more complex expression, this will probably fail
5140 unless the code uses a floating point prefix (eg "0f"). */
5141 save_in = input_line_pointer;
5142 input_line_pointer = *str;
5143 if (expression (&exp) == absolute_section
5144 && exp.X_op == O_big
5145 && exp.X_add_number < 0)
5146 {
5147 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5148 Ditto for 15. */
5149 #define X_PRECISION 5
5150 #define E_PRECISION 15L
5151 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5152 {
5153 for (i = 0; i < NUM_FLOAT_VALS; i++)
5154 {
5155 for (j = 0; j < MAX_LITTLENUMS; j++)
5156 {
5157 if (words[j] != fp_values[i][j])
5158 break;
5159 }
5160
5161 if (j == MAX_LITTLENUMS)
5162 {
5163 *str = input_line_pointer;
5164 input_line_pointer = save_in;
5165 return i + 8;
5166 }
5167 }
5168 }
5169 }
5170
5171 *str = input_line_pointer;
5172 input_line_pointer = save_in;
5173 inst.error = _("invalid FPA immediate expression");
5174 return FAIL;
5175 }
5176
5177 /* Returns 1 if a number has "quarter-precision" float format
5178 0baBbbbbbc defgh000 00000000 00000000. */
5179
5180 static int
5181 is_quarter_float (unsigned imm)
5182 {
5183 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5184 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5185 }
5186
5187
5188 /* Detect the presence of a floating point or integer zero constant,
5189 i.e. #0.0 or #0. */
5190
5191 static bfd_boolean
5192 parse_ifimm_zero (char **in)
5193 {
5194 int error_code;
5195
5196 if (!is_immediate_prefix (**in))
5197 {
5198 /* In unified syntax, all prefixes are optional. */
5199 if (!unified_syntax)
5200 return FALSE;
5201 }
5202 else
5203 ++*in;
5204
5205 /* Accept #0x0 as a synonym for #0. */
5206 if (strncmp (*in, "0x", 2) == 0)
5207 {
5208 int val;
5209 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5210 return FALSE;
5211 return TRUE;
5212 }
5213
5214 error_code = atof_generic (in, ".", EXP_CHARS,
5215 &generic_floating_point_number);
5216
5217 if (!error_code
5218 && generic_floating_point_number.sign == '+'
5219 && (generic_floating_point_number.low
5220 > generic_floating_point_number.leader))
5221 return TRUE;
5222
5223 return FALSE;
5224 }
5225
5226 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5227 0baBbbbbbc defgh000 00000000 00000000.
5228 The zero and minus-zero cases need special handling, since they can't be
5229 encoded in the "quarter-precision" float format, but can nonetheless be
5230 loaded as integer constants. */
5231
5232 static unsigned
5233 parse_qfloat_immediate (char **ccp, int *immed)
5234 {
5235 char *str = *ccp;
5236 char *fpnum;
5237 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5238 int found_fpchar = 0;
5239
5240 skip_past_char (&str, '#');
5241
5242 /* We must not accidentally parse an integer as a floating-point number. Make
5243 sure that the value we parse is not an integer by checking for special
5244 characters '.' or 'e'.
5245 FIXME: This is a horrible hack, but doing better is tricky because type
5246 information isn't in a very usable state at parse time. */
5247 fpnum = str;
5248 skip_whitespace (fpnum);
5249
5250 if (strncmp (fpnum, "0x", 2) == 0)
5251 return FAIL;
5252 else
5253 {
5254 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5255 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5256 {
5257 found_fpchar = 1;
5258 break;
5259 }
5260
5261 if (!found_fpchar)
5262 return FAIL;
5263 }
5264
5265 if ((str = atof_ieee (str, 's', words)) != NULL)
5266 {
5267 unsigned fpword = 0;
5268 int i;
5269
5270 /* Our FP word must be 32 bits (single-precision FP). */
5271 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5272 {
5273 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5274 fpword |= words[i];
5275 }
5276
5277 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5278 *immed = fpword;
5279 else
5280 return FAIL;
5281
5282 *ccp = str;
5283
5284 return SUCCESS;
5285 }
5286
5287 return FAIL;
5288 }
5289
5290 /* Shift operands. */
5291 enum shift_kind
5292 {
5293 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5294 };
5295
5296 struct asm_shift_name
5297 {
5298 const char *name;
5299 enum shift_kind kind;
5300 };
5301
5302 /* Third argument to parse_shift. */
5303 enum parse_shift_mode
5304 {
5305 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5306 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5307 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5308 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5309 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5310 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5311 };
5312
5313 /* Parse a <shift> specifier on an ARM data processing instruction.
5314 This has three forms:
5315
5316 (LSL|LSR|ASL|ASR|ROR) Rs
5317 (LSL|LSR|ASL|ASR|ROR) #imm
5318 RRX
5319
5320 Note that ASL is assimilated to LSL in the instruction encoding, and
5321 RRX to ROR #0 (which cannot be written as such). */
5322
5323 static int
5324 parse_shift (char **str, int i, enum parse_shift_mode mode)
5325 {
5326 const struct asm_shift_name *shift_name;
5327 enum shift_kind shift;
5328 char *s = *str;
5329 char *p = s;
5330 int reg;
5331
5332 for (p = *str; ISALPHA (*p); p++)
5333 ;
5334
5335 if (p == *str)
5336 {
5337 inst.error = _("shift expression expected");
5338 return FAIL;
5339 }
5340
5341 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5342 p - *str);
5343
5344 if (shift_name == NULL)
5345 {
5346 inst.error = _("shift expression expected");
5347 return FAIL;
5348 }
5349
5350 shift = shift_name->kind;
5351
5352 switch (mode)
5353 {
5354 case NO_SHIFT_RESTRICT:
5355 case SHIFT_IMMEDIATE:
5356 if (shift == SHIFT_UXTW)
5357 {
5358 inst.error = _("'UXTW' not allowed here");
5359 return FAIL;
5360 }
5361 break;
5362
5363 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5364 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5365 {
5366 inst.error = _("'LSL' or 'ASR' required");
5367 return FAIL;
5368 }
5369 break;
5370
5371 case SHIFT_LSL_IMMEDIATE:
5372 if (shift != SHIFT_LSL)
5373 {
5374 inst.error = _("'LSL' required");
5375 return FAIL;
5376 }
5377 break;
5378
5379 case SHIFT_ASR_IMMEDIATE:
5380 if (shift != SHIFT_ASR)
5381 {
5382 inst.error = _("'ASR' required");
5383 return FAIL;
5384 }
5385 break;
5386 case SHIFT_UXTW_IMMEDIATE:
5387 if (shift != SHIFT_UXTW)
5388 {
5389 inst.error = _("'UXTW' required");
5390 return FAIL;
5391 }
5392 break;
5393
5394 default: abort ();
5395 }
5396
5397 if (shift != SHIFT_RRX)
5398 {
5399 /* Whitespace can appear here if the next thing is a bare digit. */
5400 skip_whitespace (p);
5401
5402 if (mode == NO_SHIFT_RESTRICT
5403 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5404 {
5405 inst.operands[i].imm = reg;
5406 inst.operands[i].immisreg = 1;
5407 }
5408 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5409 return FAIL;
5410 }
5411 inst.operands[i].shift_kind = shift;
5412 inst.operands[i].shifted = 1;
5413 *str = p;
5414 return SUCCESS;
5415 }
5416
5417 /* Parse a <shifter_operand> for an ARM data processing instruction:
5418
5419 #<immediate>
5420 #<immediate>, <rotate>
5421 <Rm>
5422 <Rm>, <shift>
5423
5424 where <shift> is defined by parse_shift above, and <rotate> is a
5425 multiple of 2 between 0 and 30. Validation of immediate operands
5426 is deferred to md_apply_fix. */
5427
5428 static int
5429 parse_shifter_operand (char **str, int i)
5430 {
5431 int value;
5432 expressionS exp;
5433
5434 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5435 {
5436 inst.operands[i].reg = value;
5437 inst.operands[i].isreg = 1;
5438
5439 /* parse_shift will override this if appropriate */
5440 inst.relocs[0].exp.X_op = O_constant;
5441 inst.relocs[0].exp.X_add_number = 0;
5442
5443 if (skip_past_comma (str) == FAIL)
5444 return SUCCESS;
5445
5446 /* Shift operation on register. */
5447 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5448 }
5449
5450 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5451 return FAIL;
5452
5453 if (skip_past_comma (str) == SUCCESS)
5454 {
5455 /* #x, y -- ie explicit rotation by Y. */
5456 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5457 return FAIL;
5458
5459 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5460 {
5461 inst.error = _("constant expression expected");
5462 return FAIL;
5463 }
5464
5465 value = exp.X_add_number;
5466 if (value < 0 || value > 30 || value % 2 != 0)
5467 {
5468 inst.error = _("invalid rotation");
5469 return FAIL;
5470 }
5471 if (inst.relocs[0].exp.X_add_number < 0
5472 || inst.relocs[0].exp.X_add_number > 255)
5473 {
5474 inst.error = _("invalid constant");
5475 return FAIL;
5476 }
5477
5478 /* Encode as specified. */
5479 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5480 return SUCCESS;
5481 }
5482
5483 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5484 inst.relocs[0].pc_rel = 0;
5485 return SUCCESS;
5486 }
5487
5488 /* Group relocation information. Each entry in the table contains the
5489 textual name of the relocation as may appear in assembler source
5490 and must end with a colon.
5491 Along with this textual name are the relocation codes to be used if
5492 the corresponding instruction is an ALU instruction (ADD or SUB only),
5493 an LDR, an LDRS, or an LDC. */
5494
5495 struct group_reloc_table_entry
5496 {
5497 const char *name;
5498 int alu_code;
5499 int ldr_code;
5500 int ldrs_code;
5501 int ldc_code;
5502 };
5503
5504 typedef enum
5505 {
5506 /* Varieties of non-ALU group relocation. */
5507
5508 GROUP_LDR,
5509 GROUP_LDRS,
5510 GROUP_LDC,
5511 GROUP_MVE
5512 } group_reloc_type;
5513
5514 static struct group_reloc_table_entry group_reloc_table[] =
5515 { /* Program counter relative: */
5516 { "pc_g0_nc",
5517 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5518 0, /* LDR */
5519 0, /* LDRS */
5520 0 }, /* LDC */
5521 { "pc_g0",
5522 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5526 { "pc_g1_nc",
5527 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5528 0, /* LDR */
5529 0, /* LDRS */
5530 0 }, /* LDC */
5531 { "pc_g1",
5532 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5533 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5534 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5535 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5536 { "pc_g2",
5537 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5538 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5539 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5540 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5541 /* Section base relative */
5542 { "sb_g0_nc",
5543 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5544 0, /* LDR */
5545 0, /* LDRS */
5546 0 }, /* LDC */
5547 { "sb_g0",
5548 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5552 { "sb_g1_nc",
5553 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5554 0, /* LDR */
5555 0, /* LDRS */
5556 0 }, /* LDC */
5557 { "sb_g1",
5558 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5559 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5560 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5561 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5562 { "sb_g2",
5563 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5564 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5565 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5566 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5567 /* Absolute thumb alu relocations. */
5568 { "lower0_7",
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5570 0, /* LDR. */
5571 0, /* LDRS. */
5572 0 }, /* LDC. */
5573 { "lower8_15",
5574 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5575 0, /* LDR. */
5576 0, /* LDRS. */
5577 0 }, /* LDC. */
5578 { "upper0_7",
5579 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5580 0, /* LDR. */
5581 0, /* LDRS. */
5582 0 }, /* LDC. */
5583 { "upper8_15",
5584 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5585 0, /* LDR. */
5586 0, /* LDRS. */
5587 0 } }; /* LDC. */
5588
5589 /* Given the address of a pointer pointing to the textual name of a group
5590 relocation as may appear in assembler source, attempt to find its details
5591 in group_reloc_table. The pointer will be updated to the character after
5592 the trailing colon. On failure, FAIL will be returned; SUCCESS
5593 otherwise. On success, *entry will be updated to point at the relevant
5594 group_reloc_table entry. */
5595
5596 static int
5597 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5598 {
5599 unsigned int i;
5600 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5601 {
5602 int length = strlen (group_reloc_table[i].name);
5603
5604 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5605 && (*str)[length] == ':')
5606 {
5607 *out = &group_reloc_table[i];
5608 *str += (length + 1);
5609 return SUCCESS;
5610 }
5611 }
5612
5613 return FAIL;
5614 }
5615
5616 /* Parse a <shifter_operand> for an ARM data processing instruction
5617 (as for parse_shifter_operand) where group relocations are allowed:
5618
5619 #<immediate>
5620 #<immediate>, <rotate>
5621 #:<group_reloc>:<expression>
5622 <Rm>
5623 <Rm>, <shift>
5624
5625 where <group_reloc> is one of the strings defined in group_reloc_table.
5626 The hashes are optional.
5627
5628 Everything else is as for parse_shifter_operand. */
5629
5630 static parse_operand_result
5631 parse_shifter_operand_group_reloc (char **str, int i)
5632 {
5633 /* Determine if we have the sequence of characters #: or just :
5634 coming next. If we do, then we check for a group relocation.
5635 If we don't, punt the whole lot to parse_shifter_operand. */
5636
5637 if (((*str)[0] == '#' && (*str)[1] == ':')
5638 || (*str)[0] == ':')
5639 {
5640 struct group_reloc_table_entry *entry;
5641
5642 if ((*str)[0] == '#')
5643 (*str) += 2;
5644 else
5645 (*str)++;
5646
5647 /* Try to parse a group relocation. Anything else is an error. */
5648 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5649 {
5650 inst.error = _("unknown group relocation");
5651 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5652 }
5653
5654 /* We now have the group relocation table entry corresponding to
5655 the name in the assembler source. Next, we parse the expression. */
5656 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5657 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5658
5659 /* Record the relocation type (always the ALU variant here). */
5660 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5661 gas_assert (inst.relocs[0].type != 0);
5662
5663 return PARSE_OPERAND_SUCCESS;
5664 }
5665 else
5666 return parse_shifter_operand (str, i) == SUCCESS
5667 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5668
5669 /* Never reached. */
5670 }
5671
5672 /* Parse a Neon alignment expression. Information is written to
5673 inst.operands[i]. We assume the initial ':' has been skipped.
5674
5675 align .imm = align << 8, .immisalign=1, .preind=0 */
5676 static parse_operand_result
5677 parse_neon_alignment (char **str, int i)
5678 {
5679 char *p = *str;
5680 expressionS exp;
5681
5682 my_get_expression (&exp, &p, GE_NO_PREFIX);
5683
5684 if (exp.X_op != O_constant)
5685 {
5686 inst.error = _("alignment must be constant");
5687 return PARSE_OPERAND_FAIL;
5688 }
5689
5690 inst.operands[i].imm = exp.X_add_number << 8;
5691 inst.operands[i].immisalign = 1;
5692 /* Alignments are not pre-indexes. */
5693 inst.operands[i].preind = 0;
5694
5695 *str = p;
5696 return PARSE_OPERAND_SUCCESS;
5697 }
5698
5699 /* Parse all forms of an ARM address expression. Information is written
5700 to inst.operands[i] and/or inst.relocs[0].
5701
5702 Preindexed addressing (.preind=1):
5703
5704 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5705 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5706 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5707 .shift_kind=shift .relocs[0].exp=shift_imm
5708
5709 These three may have a trailing ! which causes .writeback to be set also.
5710
5711 Postindexed addressing (.postind=1, .writeback=1):
5712
5713 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5714 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5715 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5716 .shift_kind=shift .relocs[0].exp=shift_imm
5717
5718 Unindexed addressing (.preind=0, .postind=0):
5719
5720 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5721
5722 Other:
5723
5724 [Rn]{!} shorthand for [Rn,#0]{!}
5725 =immediate .isreg=0 .relocs[0].exp=immediate
5726 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5727
5728 It is the caller's responsibility to check for addressing modes not
5729 supported by the instruction, and to set inst.relocs[0].type. */
5730
5731 static parse_operand_result
5732 parse_address_main (char **str, int i, int group_relocations,
5733 group_reloc_type group_type)
5734 {
5735 char *p = *str;
5736 int reg;
5737
5738 if (skip_past_char (&p, '[') == FAIL)
5739 {
5740 if (skip_past_char (&p, '=') == FAIL)
5741 {
5742 /* Bare address - translate to PC-relative offset. */
5743 inst.relocs[0].pc_rel = 1;
5744 inst.operands[i].reg = REG_PC;
5745 inst.operands[i].isreg = 1;
5746 inst.operands[i].preind = 1;
5747
5748 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5749 return PARSE_OPERAND_FAIL;
5750 }
5751 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5752 /*allow_symbol_p=*/TRUE))
5753 return PARSE_OPERAND_FAIL;
5754
5755 *str = p;
5756 return PARSE_OPERAND_SUCCESS;
5757 }
5758
5759 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5760 skip_whitespace (p);
5761
5762 if (group_type == GROUP_MVE)
5763 {
5764 enum arm_reg_type rtype = REG_TYPE_MQ;
5765 struct neon_type_el et;
5766 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5767 {
5768 inst.operands[i].isquad = 1;
5769 }
5770 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5771 {
5772 inst.error = BAD_ADDR_MODE;
5773 return PARSE_OPERAND_FAIL;
5774 }
5775 }
5776 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5777 {
5778 if (group_type == GROUP_MVE)
5779 inst.error = BAD_ADDR_MODE;
5780 else
5781 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5782 return PARSE_OPERAND_FAIL;
5783 }
5784 inst.operands[i].reg = reg;
5785 inst.operands[i].isreg = 1;
5786
5787 if (skip_past_comma (&p) == SUCCESS)
5788 {
5789 inst.operands[i].preind = 1;
5790
5791 if (*p == '+') p++;
5792 else if (*p == '-') p++, inst.operands[i].negative = 1;
5793
5794 enum arm_reg_type rtype = REG_TYPE_MQ;
5795 struct neon_type_el et;
5796 if (group_type == GROUP_MVE
5797 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5798 {
5799 inst.operands[i].immisreg = 2;
5800 inst.operands[i].imm = reg;
5801
5802 if (skip_past_comma (&p) == SUCCESS)
5803 {
5804 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
5805 {
5806 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
5807 inst.relocs[0].exp.X_add_number = 0;
5808 }
5809 else
5810 return PARSE_OPERAND_FAIL;
5811 }
5812 }
5813 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5814 {
5815 inst.operands[i].imm = reg;
5816 inst.operands[i].immisreg = 1;
5817
5818 if (skip_past_comma (&p) == SUCCESS)
5819 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5820 return PARSE_OPERAND_FAIL;
5821 }
5822 else if (skip_past_char (&p, ':') == SUCCESS)
5823 {
5824 /* FIXME: '@' should be used here, but it's filtered out by generic
5825 code before we get to see it here. This may be subject to
5826 change. */
5827 parse_operand_result result = parse_neon_alignment (&p, i);
5828
5829 if (result != PARSE_OPERAND_SUCCESS)
5830 return result;
5831 }
5832 else
5833 {
5834 if (inst.operands[i].negative)
5835 {
5836 inst.operands[i].negative = 0;
5837 p--;
5838 }
5839
5840 if (group_relocations
5841 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5842 {
5843 struct group_reloc_table_entry *entry;
5844
5845 /* Skip over the #: or : sequence. */
5846 if (*p == '#')
5847 p += 2;
5848 else
5849 p++;
5850
5851 /* Try to parse a group relocation. Anything else is an
5852 error. */
5853 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5854 {
5855 inst.error = _("unknown group relocation");
5856 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5857 }
5858
5859 /* We now have the group relocation table entry corresponding to
5860 the name in the assembler source. Next, we parse the
5861 expression. */
5862 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5863 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5864
5865 /* Record the relocation type. */
5866 switch (group_type)
5867 {
5868 case GROUP_LDR:
5869 inst.relocs[0].type
5870 = (bfd_reloc_code_real_type) entry->ldr_code;
5871 break;
5872
5873 case GROUP_LDRS:
5874 inst.relocs[0].type
5875 = (bfd_reloc_code_real_type) entry->ldrs_code;
5876 break;
5877
5878 case GROUP_LDC:
5879 inst.relocs[0].type
5880 = (bfd_reloc_code_real_type) entry->ldc_code;
5881 break;
5882
5883 default:
5884 gas_assert (0);
5885 }
5886
5887 if (inst.relocs[0].type == 0)
5888 {
5889 inst.error = _("this group relocation is not allowed on this instruction");
5890 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5891 }
5892 }
5893 else
5894 {
5895 char *q = p;
5896
5897 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5898 return PARSE_OPERAND_FAIL;
5899 /* If the offset is 0, find out if it's a +0 or -0. */
5900 if (inst.relocs[0].exp.X_op == O_constant
5901 && inst.relocs[0].exp.X_add_number == 0)
5902 {
5903 skip_whitespace (q);
5904 if (*q == '#')
5905 {
5906 q++;
5907 skip_whitespace (q);
5908 }
5909 if (*q == '-')
5910 inst.operands[i].negative = 1;
5911 }
5912 }
5913 }
5914 }
5915 else if (skip_past_char (&p, ':') == SUCCESS)
5916 {
5917 /* FIXME: '@' should be used here, but it's filtered out by generic code
5918 before we get to see it here. This may be subject to change. */
5919 parse_operand_result result = parse_neon_alignment (&p, i);
5920
5921 if (result != PARSE_OPERAND_SUCCESS)
5922 return result;
5923 }
5924
5925 if (skip_past_char (&p, ']') == FAIL)
5926 {
5927 inst.error = _("']' expected");
5928 return PARSE_OPERAND_FAIL;
5929 }
5930
5931 if (skip_past_char (&p, '!') == SUCCESS)
5932 inst.operands[i].writeback = 1;
5933
5934 else if (skip_past_comma (&p) == SUCCESS)
5935 {
5936 if (skip_past_char (&p, '{') == SUCCESS)
5937 {
5938 /* [Rn], {expr} - unindexed, with option */
5939 if (parse_immediate (&p, &inst.operands[i].imm,
5940 0, 255, TRUE) == FAIL)
5941 return PARSE_OPERAND_FAIL;
5942
5943 if (skip_past_char (&p, '}') == FAIL)
5944 {
5945 inst.error = _("'}' expected at end of 'option' field");
5946 return PARSE_OPERAND_FAIL;
5947 }
5948 if (inst.operands[i].preind)
5949 {
5950 inst.error = _("cannot combine index with option");
5951 return PARSE_OPERAND_FAIL;
5952 }
5953 *str = p;
5954 return PARSE_OPERAND_SUCCESS;
5955 }
5956 else
5957 {
5958 inst.operands[i].postind = 1;
5959 inst.operands[i].writeback = 1;
5960
5961 if (inst.operands[i].preind)
5962 {
5963 inst.error = _("cannot combine pre- and post-indexing");
5964 return PARSE_OPERAND_FAIL;
5965 }
5966
5967 if (*p == '+') p++;
5968 else if (*p == '-') p++, inst.operands[i].negative = 1;
5969
5970 enum arm_reg_type rtype = REG_TYPE_MQ;
5971 struct neon_type_el et;
5972 if (group_type == GROUP_MVE
5973 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5974 {
5975 inst.operands[i].immisreg = 2;
5976 inst.operands[i].imm = reg;
5977 }
5978 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5979 {
5980 /* We might be using the immediate for alignment already. If we
5981 are, OR the register number into the low-order bits. */
5982 if (inst.operands[i].immisalign)
5983 inst.operands[i].imm |= reg;
5984 else
5985 inst.operands[i].imm = reg;
5986 inst.operands[i].immisreg = 1;
5987
5988 if (skip_past_comma (&p) == SUCCESS)
5989 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5990 return PARSE_OPERAND_FAIL;
5991 }
5992 else
5993 {
5994 char *q = p;
5995
5996 if (inst.operands[i].negative)
5997 {
5998 inst.operands[i].negative = 0;
5999 p--;
6000 }
6001 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6002 return PARSE_OPERAND_FAIL;
6003 /* If the offset is 0, find out if it's a +0 or -0. */
6004 if (inst.relocs[0].exp.X_op == O_constant
6005 && inst.relocs[0].exp.X_add_number == 0)
6006 {
6007 skip_whitespace (q);
6008 if (*q == '#')
6009 {
6010 q++;
6011 skip_whitespace (q);
6012 }
6013 if (*q == '-')
6014 inst.operands[i].negative = 1;
6015 }
6016 }
6017 }
6018 }
6019
6020 /* If at this point neither .preind nor .postind is set, we have a
6021 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6022 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6023 {
6024 inst.operands[i].preind = 1;
6025 inst.relocs[0].exp.X_op = O_constant;
6026 inst.relocs[0].exp.X_add_number = 0;
6027 }
6028 *str = p;
6029 return PARSE_OPERAND_SUCCESS;
6030 }
6031
6032 static int
6033 parse_address (char **str, int i)
6034 {
6035 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6036 ? SUCCESS : FAIL;
6037 }
6038
6039 static parse_operand_result
6040 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6041 {
6042 return parse_address_main (str, i, 1, type);
6043 }
6044
6045 /* Parse an operand for a MOVW or MOVT instruction. */
6046 static int
6047 parse_half (char **str)
6048 {
6049 char * p;
6050
6051 p = *str;
6052 skip_past_char (&p, '#');
6053 if (strncasecmp (p, ":lower16:", 9) == 0)
6054 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6055 else if (strncasecmp (p, ":upper16:", 9) == 0)
6056 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6057
6058 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6059 {
6060 p += 9;
6061 skip_whitespace (p);
6062 }
6063
6064 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6065 return FAIL;
6066
6067 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6068 {
6069 if (inst.relocs[0].exp.X_op != O_constant)
6070 {
6071 inst.error = _("constant expression expected");
6072 return FAIL;
6073 }
6074 if (inst.relocs[0].exp.X_add_number < 0
6075 || inst.relocs[0].exp.X_add_number > 0xffff)
6076 {
6077 inst.error = _("immediate value out of range");
6078 return FAIL;
6079 }
6080 }
6081 *str = p;
6082 return SUCCESS;
6083 }
6084
6085 /* Miscellaneous. */
6086
6087 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6088 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6089 static int
6090 parse_psr (char **str, bfd_boolean lhs)
6091 {
6092 char *p;
6093 unsigned long psr_field;
6094 const struct asm_psr *psr;
6095 char *start;
6096 bfd_boolean is_apsr = FALSE;
6097 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6098
6099 /* PR gas/12698: If the user has specified -march=all then m_profile will
6100 be TRUE, but we want to ignore it in this case as we are building for any
6101 CPU type, including non-m variants. */
6102 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6103 m_profile = FALSE;
6104
6105 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6106 feature for ease of use and backwards compatibility. */
6107 p = *str;
6108 if (strncasecmp (p, "SPSR", 4) == 0)
6109 {
6110 if (m_profile)
6111 goto unsupported_psr;
6112
6113 psr_field = SPSR_BIT;
6114 }
6115 else if (strncasecmp (p, "CPSR", 4) == 0)
6116 {
6117 if (m_profile)
6118 goto unsupported_psr;
6119
6120 psr_field = 0;
6121 }
6122 else if (strncasecmp (p, "APSR", 4) == 0)
6123 {
6124 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6125 and ARMv7-R architecture CPUs. */
6126 is_apsr = TRUE;
6127 psr_field = 0;
6128 }
6129 else if (m_profile)
6130 {
6131 start = p;
6132 do
6133 p++;
6134 while (ISALNUM (*p) || *p == '_');
6135
6136 if (strncasecmp (start, "iapsr", 5) == 0
6137 || strncasecmp (start, "eapsr", 5) == 0
6138 || strncasecmp (start, "xpsr", 4) == 0
6139 || strncasecmp (start, "psr", 3) == 0)
6140 p = start + strcspn (start, "rR") + 1;
6141
6142 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6143 p - start);
6144
6145 if (!psr)
6146 return FAIL;
6147
6148 /* If APSR is being written, a bitfield may be specified. Note that
6149 APSR itself is handled above. */
6150 if (psr->field <= 3)
6151 {
6152 psr_field = psr->field;
6153 is_apsr = TRUE;
6154 goto check_suffix;
6155 }
6156
6157 *str = p;
6158 /* M-profile MSR instructions have the mask field set to "10", except
6159 *PSR variants which modify APSR, which may use a different mask (and
6160 have been handled already). Do that by setting the PSR_f field
6161 here. */
6162 return psr->field | (lhs ? PSR_f : 0);
6163 }
6164 else
6165 goto unsupported_psr;
6166
6167 p += 4;
6168 check_suffix:
6169 if (*p == '_')
6170 {
6171 /* A suffix follows. */
6172 p++;
6173 start = p;
6174
6175 do
6176 p++;
6177 while (ISALNUM (*p) || *p == '_');
6178
6179 if (is_apsr)
6180 {
6181 /* APSR uses a notation for bits, rather than fields. */
6182 unsigned int nzcvq_bits = 0;
6183 unsigned int g_bit = 0;
6184 char *bit;
6185
6186 for (bit = start; bit != p; bit++)
6187 {
6188 switch (TOLOWER (*bit))
6189 {
6190 case 'n':
6191 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6192 break;
6193
6194 case 'z':
6195 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6196 break;
6197
6198 case 'c':
6199 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6200 break;
6201
6202 case 'v':
6203 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6204 break;
6205
6206 case 'q':
6207 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6208 break;
6209
6210 case 'g':
6211 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6212 break;
6213
6214 default:
6215 inst.error = _("unexpected bit specified after APSR");
6216 return FAIL;
6217 }
6218 }
6219
6220 if (nzcvq_bits == 0x1f)
6221 psr_field |= PSR_f;
6222
6223 if (g_bit == 0x1)
6224 {
6225 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6226 {
6227 inst.error = _("selected processor does not "
6228 "support DSP extension");
6229 return FAIL;
6230 }
6231
6232 psr_field |= PSR_s;
6233 }
6234
6235 if ((nzcvq_bits & 0x20) != 0
6236 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6237 || (g_bit & 0x2) != 0)
6238 {
6239 inst.error = _("bad bitmask specified after APSR");
6240 return FAIL;
6241 }
6242 }
6243 else
6244 {
6245 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6246 p - start);
6247 if (!psr)
6248 goto error;
6249
6250 psr_field |= psr->field;
6251 }
6252 }
6253 else
6254 {
6255 if (ISALNUM (*p))
6256 goto error; /* Garbage after "[CS]PSR". */
6257
6258 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6259 is deprecated, but allow it anyway. */
6260 if (is_apsr && lhs)
6261 {
6262 psr_field |= PSR_f;
6263 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6264 "deprecated"));
6265 }
6266 else if (!m_profile)
6267 /* These bits are never right for M-profile devices: don't set them
6268 (only code paths which read/write APSR reach here). */
6269 psr_field |= (PSR_c | PSR_f);
6270 }
6271 *str = p;
6272 return psr_field;
6273
6274 unsupported_psr:
6275 inst.error = _("selected processor does not support requested special "
6276 "purpose register");
6277 return FAIL;
6278
6279 error:
6280 inst.error = _("flag for {c}psr instruction expected");
6281 return FAIL;
6282 }
6283
6284 static int
6285 parse_sys_vldr_vstr (char **str)
6286 {
6287 unsigned i;
6288 int val = FAIL;
6289 struct {
6290 const char *name;
6291 int regl;
6292 int regh;
6293 } sysregs[] = {
6294 {"FPSCR", 0x1, 0x0},
6295 {"FPSCR_nzcvqc", 0x2, 0x0},
6296 {"VPR", 0x4, 0x1},
6297 {"P0", 0x5, 0x1},
6298 {"FPCXTNS", 0x6, 0x1},
6299 {"FPCXTS", 0x7, 0x1}
6300 };
6301 char *op_end = strchr (*str, ',');
6302 size_t op_strlen = op_end - *str;
6303
6304 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6305 {
6306 if (!strncmp (*str, sysregs[i].name, op_strlen))
6307 {
6308 val = sysregs[i].regl | (sysregs[i].regh << 3);
6309 *str = op_end;
6310 break;
6311 }
6312 }
6313
6314 return val;
6315 }
6316
6317 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6318 value suitable for splatting into the AIF field of the instruction. */
6319
6320 static int
6321 parse_cps_flags (char **str)
6322 {
6323 int val = 0;
6324 int saw_a_flag = 0;
6325 char *s = *str;
6326
6327 for (;;)
6328 switch (*s++)
6329 {
6330 case '\0': case ',':
6331 goto done;
6332
6333 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6334 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6335 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6336
6337 default:
6338 inst.error = _("unrecognized CPS flag");
6339 return FAIL;
6340 }
6341
6342 done:
6343 if (saw_a_flag == 0)
6344 {
6345 inst.error = _("missing CPS flags");
6346 return FAIL;
6347 }
6348
6349 *str = s - 1;
6350 return val;
6351 }
6352
6353 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6354 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6355
6356 static int
6357 parse_endian_specifier (char **str)
6358 {
6359 int little_endian;
6360 char *s = *str;
6361
6362 if (strncasecmp (s, "BE", 2))
6363 little_endian = 0;
6364 else if (strncasecmp (s, "LE", 2))
6365 little_endian = 1;
6366 else
6367 {
6368 inst.error = _("valid endian specifiers are be or le");
6369 return FAIL;
6370 }
6371
6372 if (ISALNUM (s[2]) || s[2] == '_')
6373 {
6374 inst.error = _("valid endian specifiers are be or le");
6375 return FAIL;
6376 }
6377
6378 *str = s + 2;
6379 return little_endian;
6380 }
6381
6382 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6383 value suitable for poking into the rotate field of an sxt or sxta
6384 instruction, or FAIL on error. */
6385
6386 static int
6387 parse_ror (char **str)
6388 {
6389 int rot;
6390 char *s = *str;
6391
6392 if (strncasecmp (s, "ROR", 3) == 0)
6393 s += 3;
6394 else
6395 {
6396 inst.error = _("missing rotation field after comma");
6397 return FAIL;
6398 }
6399
6400 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6401 return FAIL;
6402
6403 switch (rot)
6404 {
6405 case 0: *str = s; return 0x0;
6406 case 8: *str = s; return 0x1;
6407 case 16: *str = s; return 0x2;
6408 case 24: *str = s; return 0x3;
6409
6410 default:
6411 inst.error = _("rotation can only be 0, 8, 16, or 24");
6412 return FAIL;
6413 }
6414 }
6415
6416 /* Parse a conditional code (from conds[] below). The value returned is in the
6417 range 0 .. 14, or FAIL. */
6418 static int
6419 parse_cond (char **str)
6420 {
6421 char *q;
6422 const struct asm_cond *c;
6423 int n;
6424 /* Condition codes are always 2 characters, so matching up to
6425 3 characters is sufficient. */
6426 char cond[3];
6427
6428 q = *str;
6429 n = 0;
6430 while (ISALPHA (*q) && n < 3)
6431 {
6432 cond[n] = TOLOWER (*q);
6433 q++;
6434 n++;
6435 }
6436
6437 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6438 if (!c)
6439 {
6440 inst.error = _("condition required");
6441 return FAIL;
6442 }
6443
6444 *str = q;
6445 return c->value;
6446 }
6447
6448 /* Parse an option for a barrier instruction. Returns the encoding for the
6449 option, or FAIL. */
6450 static int
6451 parse_barrier (char **str)
6452 {
6453 char *p, *q;
6454 const struct asm_barrier_opt *o;
6455
6456 p = q = *str;
6457 while (ISALPHA (*q))
6458 q++;
6459
6460 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6461 q - p);
6462 if (!o)
6463 return FAIL;
6464
6465 if (!mark_feature_used (&o->arch))
6466 return FAIL;
6467
6468 *str = q;
6469 return o->value;
6470 }
6471
6472 /* Parse the operands of a table branch instruction. Similar to a memory
6473 operand. */
6474 static int
6475 parse_tb (char **str)
6476 {
6477 char * p = *str;
6478 int reg;
6479
6480 if (skip_past_char (&p, '[') == FAIL)
6481 {
6482 inst.error = _("'[' expected");
6483 return FAIL;
6484 }
6485
6486 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6487 {
6488 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6489 return FAIL;
6490 }
6491 inst.operands[0].reg = reg;
6492
6493 if (skip_past_comma (&p) == FAIL)
6494 {
6495 inst.error = _("',' expected");
6496 return FAIL;
6497 }
6498
6499 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6500 {
6501 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6502 return FAIL;
6503 }
6504 inst.operands[0].imm = reg;
6505
6506 if (skip_past_comma (&p) == SUCCESS)
6507 {
6508 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6509 return FAIL;
6510 if (inst.relocs[0].exp.X_add_number != 1)
6511 {
6512 inst.error = _("invalid shift");
6513 return FAIL;
6514 }
6515 inst.operands[0].shifted = 1;
6516 }
6517
6518 if (skip_past_char (&p, ']') == FAIL)
6519 {
6520 inst.error = _("']' expected");
6521 return FAIL;
6522 }
6523 *str = p;
6524 return SUCCESS;
6525 }
6526
6527 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6528 information on the types the operands can take and how they are encoded.
6529 Up to four operands may be read; this function handles setting the
6530 ".present" field for each read operand itself.
6531 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6532 else returns FAIL. */
6533
6534 static int
6535 parse_neon_mov (char **str, int *which_operand)
6536 {
6537 int i = *which_operand, val;
6538 enum arm_reg_type rtype;
6539 char *ptr = *str;
6540 struct neon_type_el optype;
6541
6542 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6543 {
6544 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6545 inst.operands[i].reg = val;
6546 inst.operands[i].isscalar = 1;
6547 inst.operands[i].vectype = optype;
6548 inst.operands[i++].present = 1;
6549
6550 if (skip_past_comma (&ptr) == FAIL)
6551 goto wanted_comma;
6552
6553 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6554 goto wanted_arm;
6555
6556 inst.operands[i].reg = val;
6557 inst.operands[i].isreg = 1;
6558 inst.operands[i].present = 1;
6559 }
6560 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6561 != FAIL)
6562 {
6563 /* Cases 0, 1, 2, 3, 5 (D only). */
6564 if (skip_past_comma (&ptr) == FAIL)
6565 goto wanted_comma;
6566
6567 inst.operands[i].reg = val;
6568 inst.operands[i].isreg = 1;
6569 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6570 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6571 inst.operands[i].isvec = 1;
6572 inst.operands[i].vectype = optype;
6573 inst.operands[i++].present = 1;
6574
6575 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6576 {
6577 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6578 Case 13: VMOV <Sd>, <Rm> */
6579 inst.operands[i].reg = val;
6580 inst.operands[i].isreg = 1;
6581 inst.operands[i].present = 1;
6582
6583 if (rtype == REG_TYPE_NQ)
6584 {
6585 first_error (_("can't use Neon quad register here"));
6586 return FAIL;
6587 }
6588 else if (rtype != REG_TYPE_VFS)
6589 {
6590 i++;
6591 if (skip_past_comma (&ptr) == FAIL)
6592 goto wanted_comma;
6593 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6594 goto wanted_arm;
6595 inst.operands[i].reg = val;
6596 inst.operands[i].isreg = 1;
6597 inst.operands[i].present = 1;
6598 }
6599 }
6600 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6601 &optype)) != FAIL)
6602 {
6603 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6604 Case 1: VMOV<c><q> <Dd>, <Dm>
6605 Case 8: VMOV.F32 <Sd>, <Sm>
6606 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6607
6608 inst.operands[i].reg = val;
6609 inst.operands[i].isreg = 1;
6610 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6611 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6612 inst.operands[i].isvec = 1;
6613 inst.operands[i].vectype = optype;
6614 inst.operands[i].present = 1;
6615
6616 if (skip_past_comma (&ptr) == SUCCESS)
6617 {
6618 /* Case 15. */
6619 i++;
6620
6621 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6622 goto wanted_arm;
6623
6624 inst.operands[i].reg = val;
6625 inst.operands[i].isreg = 1;
6626 inst.operands[i++].present = 1;
6627
6628 if (skip_past_comma (&ptr) == FAIL)
6629 goto wanted_comma;
6630
6631 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6632 goto wanted_arm;
6633
6634 inst.operands[i].reg = val;
6635 inst.operands[i].isreg = 1;
6636 inst.operands[i].present = 1;
6637 }
6638 }
6639 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6641 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6642 Case 10: VMOV.F32 <Sd>, #<imm>
6643 Case 11: VMOV.F64 <Dd>, #<imm> */
6644 inst.operands[i].immisfloat = 1;
6645 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6646 == SUCCESS)
6647 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6648 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6649 ;
6650 else
6651 {
6652 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6653 return FAIL;
6654 }
6655 }
6656 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6657 {
6658 /* Cases 6, 7. */
6659 inst.operands[i].reg = val;
6660 inst.operands[i].isreg = 1;
6661 inst.operands[i++].present = 1;
6662
6663 if (skip_past_comma (&ptr) == FAIL)
6664 goto wanted_comma;
6665
6666 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6667 {
6668 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6669 inst.operands[i].reg = val;
6670 inst.operands[i].isscalar = 1;
6671 inst.operands[i].present = 1;
6672 inst.operands[i].vectype = optype;
6673 }
6674 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6675 {
6676 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6677 inst.operands[i].reg = val;
6678 inst.operands[i].isreg = 1;
6679 inst.operands[i++].present = 1;
6680
6681 if (skip_past_comma (&ptr) == FAIL)
6682 goto wanted_comma;
6683
6684 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6685 == FAIL)
6686 {
6687 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6688 return FAIL;
6689 }
6690
6691 inst.operands[i].reg = val;
6692 inst.operands[i].isreg = 1;
6693 inst.operands[i].isvec = 1;
6694 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6695 inst.operands[i].vectype = optype;
6696 inst.operands[i].present = 1;
6697
6698 if (rtype == REG_TYPE_VFS)
6699 {
6700 /* Case 14. */
6701 i++;
6702 if (skip_past_comma (&ptr) == FAIL)
6703 goto wanted_comma;
6704 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6705 &optype)) == FAIL)
6706 {
6707 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6708 return FAIL;
6709 }
6710 inst.operands[i].reg = val;
6711 inst.operands[i].isreg = 1;
6712 inst.operands[i].isvec = 1;
6713 inst.operands[i].issingle = 1;
6714 inst.operands[i].vectype = optype;
6715 inst.operands[i].present = 1;
6716 }
6717 }
6718 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6719 != FAIL)
6720 {
6721 /* Case 13. */
6722 inst.operands[i].reg = val;
6723 inst.operands[i].isreg = 1;
6724 inst.operands[i].isvec = 1;
6725 inst.operands[i].issingle = 1;
6726 inst.operands[i].vectype = optype;
6727 inst.operands[i].present = 1;
6728 }
6729 }
6730 else
6731 {
6732 first_error (_("parse error"));
6733 return FAIL;
6734 }
6735
6736 /* Successfully parsed the operands. Update args. */
6737 *which_operand = i;
6738 *str = ptr;
6739 return SUCCESS;
6740
6741 wanted_comma:
6742 first_error (_("expected comma"));
6743 return FAIL;
6744
6745 wanted_arm:
6746 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6747 return FAIL;
6748 }
6749
6750 /* Use this macro when the operand constraints are different
6751 for ARM and THUMB (e.g. ldrd). */
6752 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6753 ((arm_operand) | ((thumb_operand) << 16))
6754
6755 /* Matcher codes for parse_operands. */
6756 enum operand_parse_code
6757 {
6758 OP_stop, /* end of line */
6759
6760 OP_RR, /* ARM register */
6761 OP_RRnpc, /* ARM register, not r15 */
6762 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6763 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6764 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6765 optional trailing ! */
6766 OP_RRw, /* ARM register, not r15, optional trailing ! */
6767 OP_RCP, /* Coprocessor number */
6768 OP_RCN, /* Coprocessor register */
6769 OP_RF, /* FPA register */
6770 OP_RVS, /* VFP single precision register */
6771 OP_RVD, /* VFP double precision register (0..15) */
6772 OP_RND, /* Neon double precision register (0..31) */
6773 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
6774 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
6775 */
6776 OP_RNQ, /* Neon quad precision register */
6777 OP_RNQMQ, /* Neon quad or MVE vector register. */
6778 OP_RVSD, /* VFP single or double precision register */
6779 OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */
6780 OP_RNSD, /* Neon single or double precision register */
6781 OP_RNDQ, /* Neon double or quad precision register */
6782 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
6783 OP_RNSDQ, /* Neon single, double or quad precision register */
6784 OP_RNSC, /* Neon scalar D[X] */
6785 OP_RVC, /* VFP control register */
6786 OP_RMF, /* Maverick F register */
6787 OP_RMD, /* Maverick D register */
6788 OP_RMFX, /* Maverick FX register */
6789 OP_RMDX, /* Maverick DX register */
6790 OP_RMAX, /* Maverick AX register */
6791 OP_RMDS, /* Maverick DSPSC register */
6792 OP_RIWR, /* iWMMXt wR register */
6793 OP_RIWC, /* iWMMXt wC register */
6794 OP_RIWG, /* iWMMXt wCG register */
6795 OP_RXA, /* XScale accumulator register */
6796
6797 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
6798 */
6799 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
6800 GPR (no SP/SP) */
6801 OP_RMQ, /* MVE vector register. */
6802
6803 /* New operands for Armv8.1-M Mainline. */
6804 OP_LR, /* ARM LR register */
6805 OP_RRe, /* ARM register, only even numbered. */
6806 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
6807 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6808
6809 OP_REGLST, /* ARM register list */
6810 OP_CLRMLST, /* CLRM register list */
6811 OP_VRSLST, /* VFP single-precision register list */
6812 OP_VRDLST, /* VFP double-precision register list */
6813 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6814 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6815 OP_NSTRLST, /* Neon element/structure list */
6816 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6817 OP_MSTRLST2, /* MVE vector list with two elements. */
6818 OP_MSTRLST4, /* MVE vector list with four elements. */
6819
6820 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6821 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6822 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6823 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6824 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6825 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6826 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6827 */
6828 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6829 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6830 OP_VMOV, /* Neon VMOV operands. */
6831 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6832 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6833 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6834 OP_VLDR, /* VLDR operand. */
6835
6836 OP_I0, /* immediate zero */
6837 OP_I7, /* immediate value 0 .. 7 */
6838 OP_I15, /* 0 .. 15 */
6839 OP_I16, /* 1 .. 16 */
6840 OP_I16z, /* 0 .. 16 */
6841 OP_I31, /* 0 .. 31 */
6842 OP_I31w, /* 0 .. 31, optional trailing ! */
6843 OP_I32, /* 1 .. 32 */
6844 OP_I32z, /* 0 .. 32 */
6845 OP_I63, /* 0 .. 63 */
6846 OP_I63s, /* -64 .. 63 */
6847 OP_I64, /* 1 .. 64 */
6848 OP_I64z, /* 0 .. 64 */
6849 OP_I255, /* 0 .. 255 */
6850
6851 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6852 OP_I7b, /* 0 .. 7 */
6853 OP_I15b, /* 0 .. 15 */
6854 OP_I31b, /* 0 .. 31 */
6855
6856 OP_SH, /* shifter operand */
6857 OP_SHG, /* shifter operand with possible group relocation */
6858 OP_ADDR, /* Memory address expression (any mode) */
6859 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
6860 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6861 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6862 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6863 OP_EXP, /* arbitrary expression */
6864 OP_EXPi, /* same, with optional immediate prefix */
6865 OP_EXPr, /* same, with optional relocation suffix */
6866 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6867 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6868 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6869 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6870
6871 OP_CPSF, /* CPS flags */
6872 OP_ENDI, /* Endianness specifier */
6873 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6874 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6875 OP_COND, /* conditional code */
6876 OP_TB, /* Table branch. */
6877
6878 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6879
6880 OP_RRnpc_I0, /* ARM register or literal 0 */
6881 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6882 OP_RR_EXi, /* ARM register or expression with imm prefix */
6883 OP_RF_IF, /* FPA register or immediate */
6884 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6885 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6886
6887 /* Optional operands. */
6888 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6889 OP_oI31b, /* 0 .. 31 */
6890 OP_oI32b, /* 1 .. 32 */
6891 OP_oI32z, /* 0 .. 32 */
6892 OP_oIffffb, /* 0 .. 65535 */
6893 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6894
6895 OP_oRR, /* ARM register */
6896 OP_oLR, /* ARM LR register */
6897 OP_oRRnpc, /* ARM register, not the PC */
6898 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6899 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6900 OP_oRND, /* Optional Neon double precision register */
6901 OP_oRNQ, /* Optional Neon quad precision register */
6902 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
6903 OP_oRNDQ, /* Optional Neon double or quad precision register */
6904 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6905 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
6906 register. */
6907 OP_oSHll, /* LSL immediate */
6908 OP_oSHar, /* ASR immediate */
6909 OP_oSHllar, /* LSL or ASR immediate */
6910 OP_oROR, /* ROR 0/8/16/24 */
6911 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6912
6913 /* Some pre-defined mixed (ARM/THUMB) operands. */
6914 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6915 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6916 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6917
6918 OP_FIRST_OPTIONAL = OP_oI7b
6919 };
6920
6921 /* Generic instruction operand parser. This does no encoding and no
6922 semantic validation; it merely squirrels values away in the inst
6923 structure. Returns SUCCESS or FAIL depending on whether the
6924 specified grammar matched. */
6925 static int
6926 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6927 {
6928 unsigned const int *upat = pattern;
6929 char *backtrack_pos = 0;
6930 const char *backtrack_error = 0;
6931 int i, val = 0, backtrack_index = 0;
6932 enum arm_reg_type rtype;
6933 parse_operand_result result;
6934 unsigned int op_parse_code;
6935 bfd_boolean partial_match;
6936
6937 #define po_char_or_fail(chr) \
6938 do \
6939 { \
6940 if (skip_past_char (&str, chr) == FAIL) \
6941 goto bad_args; \
6942 } \
6943 while (0)
6944
6945 #define po_reg_or_fail(regtype) \
6946 do \
6947 { \
6948 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6949 & inst.operands[i].vectype); \
6950 if (val == FAIL) \
6951 { \
6952 first_error (_(reg_expected_msgs[regtype])); \
6953 goto failure; \
6954 } \
6955 inst.operands[i].reg = val; \
6956 inst.operands[i].isreg = 1; \
6957 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6958 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6959 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6960 || rtype == REG_TYPE_VFD \
6961 || rtype == REG_TYPE_NQ); \
6962 } \
6963 while (0)
6964
6965 #define po_reg_or_goto(regtype, label) \
6966 do \
6967 { \
6968 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6969 & inst.operands[i].vectype); \
6970 if (val == FAIL) \
6971 goto label; \
6972 \
6973 inst.operands[i].reg = val; \
6974 inst.operands[i].isreg = 1; \
6975 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6976 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6977 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6978 || rtype == REG_TYPE_VFD \
6979 || rtype == REG_TYPE_NQ); \
6980 } \
6981 while (0)
6982
6983 #define po_imm_or_fail(min, max, popt) \
6984 do \
6985 { \
6986 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6987 goto failure; \
6988 inst.operands[i].imm = val; \
6989 } \
6990 while (0)
6991
6992 #define po_scalar_or_goto(elsz, label) \
6993 do \
6994 { \
6995 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6996 if (val == FAIL) \
6997 goto label; \
6998 inst.operands[i].reg = val; \
6999 inst.operands[i].isscalar = 1; \
7000 } \
7001 while (0)
7002
7003 #define po_misc_or_fail(expr) \
7004 do \
7005 { \
7006 if (expr) \
7007 goto failure; \
7008 } \
7009 while (0)
7010
7011 #define po_misc_or_fail_no_backtrack(expr) \
7012 do \
7013 { \
7014 result = expr; \
7015 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7016 backtrack_pos = 0; \
7017 if (result != PARSE_OPERAND_SUCCESS) \
7018 goto failure; \
7019 } \
7020 while (0)
7021
7022 #define po_barrier_or_imm(str) \
7023 do \
7024 { \
7025 val = parse_barrier (&str); \
7026 if (val == FAIL && ! ISALPHA (*str)) \
7027 goto immediate; \
7028 if (val == FAIL \
7029 /* ISB can only take SY as an option. */ \
7030 || ((inst.instruction & 0xf0) == 0x60 \
7031 && val != 0xf)) \
7032 { \
7033 inst.error = _("invalid barrier type"); \
7034 backtrack_pos = 0; \
7035 goto failure; \
7036 } \
7037 } \
7038 while (0)
7039
7040 skip_whitespace (str);
7041
7042 for (i = 0; upat[i] != OP_stop; i++)
7043 {
7044 op_parse_code = upat[i];
7045 if (op_parse_code >= 1<<16)
7046 op_parse_code = thumb ? (op_parse_code >> 16)
7047 : (op_parse_code & ((1<<16)-1));
7048
7049 if (op_parse_code >= OP_FIRST_OPTIONAL)
7050 {
7051 /* Remember where we are in case we need to backtrack. */
7052 backtrack_pos = str;
7053 backtrack_error = inst.error;
7054 backtrack_index = i;
7055 }
7056
7057 if (i > 0 && (i > 1 || inst.operands[0].present))
7058 po_char_or_fail (',');
7059
7060 switch (op_parse_code)
7061 {
7062 /* Registers */
7063 case OP_oRRnpc:
7064 case OP_oRRnpcsp:
7065 case OP_RRnpc:
7066 case OP_RRnpcsp:
7067 case OP_oRR:
7068 case OP_RRe:
7069 case OP_RRo:
7070 case OP_LR:
7071 case OP_oLR:
7072 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7073 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7074 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7075 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7076 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7077 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7078 case OP_oRND:
7079 case OP_RNDMQR:
7080 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7081 break;
7082 try_rndmq:
7083 case OP_RNDMQ:
7084 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7085 break;
7086 try_rnd:
7087 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7088 case OP_RVC:
7089 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7090 break;
7091 /* Also accept generic coprocessor regs for unknown registers. */
7092 coproc_reg:
7093 po_reg_or_fail (REG_TYPE_CN);
7094 break;
7095 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7096 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7097 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7098 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7099 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7100 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7101 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7102 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7103 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7104 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7105 case OP_oRNQ:
7106 case OP_RNQMQ:
7107 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7108 break;
7109 try_nq:
7110 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7111 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7112 case OP_oRNDQMQ:
7113 case OP_RNDQMQ:
7114 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7115 break;
7116 try_rndq:
7117 case OP_oRNDQ:
7118 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7119 case OP_RVSDMQ:
7120 po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7121 break;
7122 try_rvsd:
7123 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7124 case OP_oRNSDQ:
7125 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7126 case OP_RNSDQMQR:
7127 po_reg_or_goto (REG_TYPE_RN, try_mq);
7128 break;
7129 try_mq:
7130 case OP_oRNSDQMQ:
7131 case OP_RNSDQMQ:
7132 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7133 break;
7134 try_nsdq2:
7135 po_reg_or_fail (REG_TYPE_NSDQ);
7136 inst.error = 0;
7137 break;
7138 case OP_RMQ:
7139 po_reg_or_fail (REG_TYPE_MQ);
7140 break;
7141 /* Neon scalar. Using an element size of 8 means that some invalid
7142 scalars are accepted here, so deal with those in later code. */
7143 case OP_RNSC: po_scalar_or_goto (8, failure); break;
7144
7145 case OP_RNDQ_I0:
7146 {
7147 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7148 break;
7149 try_imm0:
7150 po_imm_or_fail (0, 0, TRUE);
7151 }
7152 break;
7153
7154 case OP_RVSD_I0:
7155 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7156 break;
7157
7158 case OP_RSVD_FI0:
7159 {
7160 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7161 break;
7162 try_ifimm0:
7163 if (parse_ifimm_zero (&str))
7164 inst.operands[i].imm = 0;
7165 else
7166 {
7167 inst.error
7168 = _("only floating point zero is allowed as immediate value");
7169 goto failure;
7170 }
7171 }
7172 break;
7173
7174 case OP_RR_RNSC:
7175 {
7176 po_scalar_or_goto (8, try_rr);
7177 break;
7178 try_rr:
7179 po_reg_or_fail (REG_TYPE_RN);
7180 }
7181 break;
7182
7183 case OP_RNSDQ_RNSC_MQ:
7184 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7185 break;
7186 try_rnsdq_rnsc:
7187 case OP_RNSDQ_RNSC:
7188 {
7189 po_scalar_or_goto (8, try_nsdq);
7190 break;
7191 try_nsdq:
7192 po_reg_or_fail (REG_TYPE_NSDQ);
7193 }
7194 break;
7195
7196 case OP_RNSD_RNSC:
7197 {
7198 po_scalar_or_goto (8, try_s_scalar);
7199 break;
7200 try_s_scalar:
7201 po_scalar_or_goto (4, try_nsd);
7202 break;
7203 try_nsd:
7204 po_reg_or_fail (REG_TYPE_NSD);
7205 }
7206 break;
7207
7208 case OP_RNDQ_RNSC:
7209 {
7210 po_scalar_or_goto (8, try_ndq);
7211 break;
7212 try_ndq:
7213 po_reg_or_fail (REG_TYPE_NDQ);
7214 }
7215 break;
7216
7217 case OP_RND_RNSC:
7218 {
7219 po_scalar_or_goto (8, try_vfd);
7220 break;
7221 try_vfd:
7222 po_reg_or_fail (REG_TYPE_VFD);
7223 }
7224 break;
7225
7226 case OP_VMOV:
7227 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7228 not careful then bad things might happen. */
7229 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7230 break;
7231
7232 case OP_RNDQ_Ibig:
7233 {
7234 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7235 break;
7236 try_immbig:
7237 /* There's a possibility of getting a 64-bit immediate here, so
7238 we need special handling. */
7239 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7240 == FAIL)
7241 {
7242 inst.error = _("immediate value is out of range");
7243 goto failure;
7244 }
7245 }
7246 break;
7247
7248 case OP_RNDQ_I63b:
7249 {
7250 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7251 break;
7252 try_shimm:
7253 po_imm_or_fail (0, 63, TRUE);
7254 }
7255 break;
7256
7257 case OP_RRnpcb:
7258 po_char_or_fail ('[');
7259 po_reg_or_fail (REG_TYPE_RN);
7260 po_char_or_fail (']');
7261 break;
7262
7263 case OP_RRnpctw:
7264 case OP_RRw:
7265 case OP_oRRw:
7266 po_reg_or_fail (REG_TYPE_RN);
7267 if (skip_past_char (&str, '!') == SUCCESS)
7268 inst.operands[i].writeback = 1;
7269 break;
7270
7271 /* Immediates */
7272 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7273 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7274 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7275 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7276 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7277 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7278 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7279 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7280 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7281 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7282 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7283 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7284
7285 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7286 case OP_oI7b:
7287 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7288 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7289 case OP_oI31b:
7290 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7291 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7292 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7293 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7294
7295 /* Immediate variants */
7296 case OP_oI255c:
7297 po_char_or_fail ('{');
7298 po_imm_or_fail (0, 255, TRUE);
7299 po_char_or_fail ('}');
7300 break;
7301
7302 case OP_I31w:
7303 /* The expression parser chokes on a trailing !, so we have
7304 to find it first and zap it. */
7305 {
7306 char *s = str;
7307 while (*s && *s != ',')
7308 s++;
7309 if (s[-1] == '!')
7310 {
7311 s[-1] = '\0';
7312 inst.operands[i].writeback = 1;
7313 }
7314 po_imm_or_fail (0, 31, TRUE);
7315 if (str == s - 1)
7316 str = s;
7317 }
7318 break;
7319
7320 /* Expressions */
7321 case OP_EXPi: EXPi:
7322 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7323 GE_OPT_PREFIX));
7324 break;
7325
7326 case OP_EXP:
7327 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7328 GE_NO_PREFIX));
7329 break;
7330
7331 case OP_EXPr: EXPr:
7332 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7333 GE_NO_PREFIX));
7334 if (inst.relocs[0].exp.X_op == O_symbol)
7335 {
7336 val = parse_reloc (&str);
7337 if (val == -1)
7338 {
7339 inst.error = _("unrecognized relocation suffix");
7340 goto failure;
7341 }
7342 else if (val != BFD_RELOC_UNUSED)
7343 {
7344 inst.operands[i].imm = val;
7345 inst.operands[i].hasreloc = 1;
7346 }
7347 }
7348 break;
7349
7350 case OP_EXPs:
7351 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7352 GE_NO_PREFIX));
7353 if (inst.relocs[i].exp.X_op == O_symbol)
7354 {
7355 inst.operands[i].hasreloc = 1;
7356 }
7357 else if (inst.relocs[i].exp.X_op == O_constant)
7358 {
7359 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7360 inst.operands[i].hasreloc = 0;
7361 }
7362 break;
7363
7364 /* Operand for MOVW or MOVT. */
7365 case OP_HALF:
7366 po_misc_or_fail (parse_half (&str));
7367 break;
7368
7369 /* Register or expression. */
7370 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7371 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7372
7373 /* Register or immediate. */
7374 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7375 I0: po_imm_or_fail (0, 0, FALSE); break;
7376
7377 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7378 IF:
7379 if (!is_immediate_prefix (*str))
7380 goto bad_args;
7381 str++;
7382 val = parse_fpa_immediate (&str);
7383 if (val == FAIL)
7384 goto failure;
7385 /* FPA immediates are encoded as registers 8-15.
7386 parse_fpa_immediate has already applied the offset. */
7387 inst.operands[i].reg = val;
7388 inst.operands[i].isreg = 1;
7389 break;
7390
7391 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7392 I32z: po_imm_or_fail (0, 32, FALSE); break;
7393
7394 /* Two kinds of register. */
7395 case OP_RIWR_RIWC:
7396 {
7397 struct reg_entry *rege = arm_reg_parse_multi (&str);
7398 if (!rege
7399 || (rege->type != REG_TYPE_MMXWR
7400 && rege->type != REG_TYPE_MMXWC
7401 && rege->type != REG_TYPE_MMXWCG))
7402 {
7403 inst.error = _("iWMMXt data or control register expected");
7404 goto failure;
7405 }
7406 inst.operands[i].reg = rege->number;
7407 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7408 }
7409 break;
7410
7411 case OP_RIWC_RIWG:
7412 {
7413 struct reg_entry *rege = arm_reg_parse_multi (&str);
7414 if (!rege
7415 || (rege->type != REG_TYPE_MMXWC
7416 && rege->type != REG_TYPE_MMXWCG))
7417 {
7418 inst.error = _("iWMMXt control register expected");
7419 goto failure;
7420 }
7421 inst.operands[i].reg = rege->number;
7422 inst.operands[i].isreg = 1;
7423 }
7424 break;
7425
7426 /* Misc */
7427 case OP_CPSF: val = parse_cps_flags (&str); break;
7428 case OP_ENDI: val = parse_endian_specifier (&str); break;
7429 case OP_oROR: val = parse_ror (&str); break;
7430 case OP_COND: val = parse_cond (&str); break;
7431 case OP_oBARRIER_I15:
7432 po_barrier_or_imm (str); break;
7433 immediate:
7434 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7435 goto failure;
7436 break;
7437
7438 case OP_wPSR:
7439 case OP_rPSR:
7440 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7441 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7442 {
7443 inst.error = _("Banked registers are not available with this "
7444 "architecture.");
7445 goto failure;
7446 }
7447 break;
7448 try_psr:
7449 val = parse_psr (&str, op_parse_code == OP_wPSR);
7450 break;
7451
7452 case OP_VLDR:
7453 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7454 break;
7455 try_sysreg:
7456 val = parse_sys_vldr_vstr (&str);
7457 break;
7458
7459 case OP_APSR_RR:
7460 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7461 break;
7462 try_apsr:
7463 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7464 instruction). */
7465 if (strncasecmp (str, "APSR_", 5) == 0)
7466 {
7467 unsigned found = 0;
7468 str += 5;
7469 while (found < 15)
7470 switch (*str++)
7471 {
7472 case 'c': found = (found & 1) ? 16 : found | 1; break;
7473 case 'n': found = (found & 2) ? 16 : found | 2; break;
7474 case 'z': found = (found & 4) ? 16 : found | 4; break;
7475 case 'v': found = (found & 8) ? 16 : found | 8; break;
7476 default: found = 16;
7477 }
7478 if (found != 15)
7479 goto failure;
7480 inst.operands[i].isvec = 1;
7481 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7482 inst.operands[i].reg = REG_PC;
7483 }
7484 else
7485 goto failure;
7486 break;
7487
7488 case OP_TB:
7489 po_misc_or_fail (parse_tb (&str));
7490 break;
7491
7492 /* Register lists. */
7493 case OP_REGLST:
7494 val = parse_reg_list (&str, REGLIST_RN);
7495 if (*str == '^')
7496 {
7497 inst.operands[i].writeback = 1;
7498 str++;
7499 }
7500 break;
7501
7502 case OP_CLRMLST:
7503 val = parse_reg_list (&str, REGLIST_CLRM);
7504 break;
7505
7506 case OP_VRSLST:
7507 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7508 &partial_match);
7509 break;
7510
7511 case OP_VRDLST:
7512 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7513 &partial_match);
7514 break;
7515
7516 case OP_VRSDLST:
7517 /* Allow Q registers too. */
7518 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7519 REGLIST_NEON_D, &partial_match);
7520 if (val == FAIL)
7521 {
7522 inst.error = NULL;
7523 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7524 REGLIST_VFP_S, &partial_match);
7525 inst.operands[i].issingle = 1;
7526 }
7527 break;
7528
7529 case OP_VRSDVLST:
7530 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7531 REGLIST_VFP_D_VPR, &partial_match);
7532 if (val == FAIL && !partial_match)
7533 {
7534 inst.error = NULL;
7535 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7536 REGLIST_VFP_S_VPR, &partial_match);
7537 inst.operands[i].issingle = 1;
7538 }
7539 break;
7540
7541 case OP_NRDLST:
7542 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7543 REGLIST_NEON_D, &partial_match);
7544 break;
7545
7546 case OP_MSTRLST4:
7547 case OP_MSTRLST2:
7548 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7549 1, &inst.operands[i].vectype);
7550 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7551 goto failure;
7552 break;
7553 case OP_NSTRLST:
7554 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7555 0, &inst.operands[i].vectype);
7556 break;
7557
7558 /* Addressing modes */
7559 case OP_ADDRMVE:
7560 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7561 break;
7562
7563 case OP_ADDR:
7564 po_misc_or_fail (parse_address (&str, i));
7565 break;
7566
7567 case OP_ADDRGLDR:
7568 po_misc_or_fail_no_backtrack (
7569 parse_address_group_reloc (&str, i, GROUP_LDR));
7570 break;
7571
7572 case OP_ADDRGLDRS:
7573 po_misc_or_fail_no_backtrack (
7574 parse_address_group_reloc (&str, i, GROUP_LDRS));
7575 break;
7576
7577 case OP_ADDRGLDC:
7578 po_misc_or_fail_no_backtrack (
7579 parse_address_group_reloc (&str, i, GROUP_LDC));
7580 break;
7581
7582 case OP_SH:
7583 po_misc_or_fail (parse_shifter_operand (&str, i));
7584 break;
7585
7586 case OP_SHG:
7587 po_misc_or_fail_no_backtrack (
7588 parse_shifter_operand_group_reloc (&str, i));
7589 break;
7590
7591 case OP_oSHll:
7592 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7593 break;
7594
7595 case OP_oSHar:
7596 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7597 break;
7598
7599 case OP_oSHllar:
7600 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7601 break;
7602
7603 default:
7604 as_fatal (_("unhandled operand code %d"), op_parse_code);
7605 }
7606
7607 /* Various value-based sanity checks and shared operations. We
7608 do not signal immediate failures for the register constraints;
7609 this allows a syntax error to take precedence. */
7610 switch (op_parse_code)
7611 {
7612 case OP_oRRnpc:
7613 case OP_RRnpc:
7614 case OP_RRnpcb:
7615 case OP_RRw:
7616 case OP_oRRw:
7617 case OP_RRnpc_I0:
7618 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7619 inst.error = BAD_PC;
7620 break;
7621
7622 case OP_oRRnpcsp:
7623 case OP_RRnpcsp:
7624 if (inst.operands[i].isreg)
7625 {
7626 if (inst.operands[i].reg == REG_PC)
7627 inst.error = BAD_PC;
7628 else if (inst.operands[i].reg == REG_SP
7629 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7630 relaxed since ARMv8-A. */
7631 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7632 {
7633 gas_assert (thumb);
7634 inst.error = BAD_SP;
7635 }
7636 }
7637 break;
7638
7639 case OP_RRnpctw:
7640 if (inst.operands[i].isreg
7641 && inst.operands[i].reg == REG_PC
7642 && (inst.operands[i].writeback || thumb))
7643 inst.error = BAD_PC;
7644 break;
7645
7646 case OP_VLDR:
7647 if (inst.operands[i].isreg)
7648 break;
7649 /* fall through. */
7650 case OP_CPSF:
7651 case OP_ENDI:
7652 case OP_oROR:
7653 case OP_wPSR:
7654 case OP_rPSR:
7655 case OP_COND:
7656 case OP_oBARRIER_I15:
7657 case OP_REGLST:
7658 case OP_CLRMLST:
7659 case OP_VRSLST:
7660 case OP_VRDLST:
7661 case OP_VRSDLST:
7662 case OP_VRSDVLST:
7663 case OP_NRDLST:
7664 case OP_NSTRLST:
7665 case OP_MSTRLST2:
7666 case OP_MSTRLST4:
7667 if (val == FAIL)
7668 goto failure;
7669 inst.operands[i].imm = val;
7670 break;
7671
7672 case OP_LR:
7673 case OP_oLR:
7674 if (inst.operands[i].reg != REG_LR)
7675 inst.error = _("operand must be LR register");
7676 break;
7677
7678 case OP_RRe:
7679 if (inst.operands[i].isreg
7680 && (inst.operands[i].reg & 0x00000001) != 0)
7681 inst.error = BAD_ODD;
7682 break;
7683
7684 case OP_RRo:
7685 if (inst.operands[i].isreg)
7686 {
7687 if ((inst.operands[i].reg & 0x00000001) != 1)
7688 inst.error = BAD_EVEN;
7689 else if (inst.operands[i].reg == REG_SP)
7690 as_tsktsk (MVE_BAD_SP);
7691 else if (inst.operands[i].reg == REG_PC)
7692 inst.error = BAD_PC;
7693 }
7694 break;
7695
7696 default:
7697 break;
7698 }
7699
7700 /* If we get here, this operand was successfully parsed. */
7701 inst.operands[i].present = 1;
7702 continue;
7703
7704 bad_args:
7705 inst.error = BAD_ARGS;
7706
7707 failure:
7708 if (!backtrack_pos)
7709 {
7710 /* The parse routine should already have set inst.error, but set a
7711 default here just in case. */
7712 if (!inst.error)
7713 inst.error = BAD_SYNTAX;
7714 return FAIL;
7715 }
7716
7717 /* Do not backtrack over a trailing optional argument that
7718 absorbed some text. We will only fail again, with the
7719 'garbage following instruction' error message, which is
7720 probably less helpful than the current one. */
7721 if (backtrack_index == i && backtrack_pos != str
7722 && upat[i+1] == OP_stop)
7723 {
7724 if (!inst.error)
7725 inst.error = BAD_SYNTAX;
7726 return FAIL;
7727 }
7728
7729 /* Try again, skipping the optional argument at backtrack_pos. */
7730 str = backtrack_pos;
7731 inst.error = backtrack_error;
7732 inst.operands[backtrack_index].present = 0;
7733 i = backtrack_index;
7734 backtrack_pos = 0;
7735 }
7736
7737 /* Check that we have parsed all the arguments. */
7738 if (*str != '\0' && !inst.error)
7739 inst.error = _("garbage following instruction");
7740
7741 return inst.error ? FAIL : SUCCESS;
7742 }
7743
7744 #undef po_char_or_fail
7745 #undef po_reg_or_fail
7746 #undef po_reg_or_goto
7747 #undef po_imm_or_fail
7748 #undef po_scalar_or_fail
7749 #undef po_barrier_or_imm
7750
7751 /* Shorthand macro for instruction encoding functions issuing errors. */
7752 #define constraint(expr, err) \
7753 do \
7754 { \
7755 if (expr) \
7756 { \
7757 inst.error = err; \
7758 return; \
7759 } \
7760 } \
7761 while (0)
7762
7763 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7764 instructions are unpredictable if these registers are used. This
7765 is the BadReg predicate in ARM's Thumb-2 documentation.
7766
7767 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7768 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7769 #define reject_bad_reg(reg) \
7770 do \
7771 if (reg == REG_PC) \
7772 { \
7773 inst.error = BAD_PC; \
7774 return; \
7775 } \
7776 else if (reg == REG_SP \
7777 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7778 { \
7779 inst.error = BAD_SP; \
7780 return; \
7781 } \
7782 while (0)
7783
7784 /* If REG is R13 (the stack pointer), warn that its use is
7785 deprecated. */
7786 #define warn_deprecated_sp(reg) \
7787 do \
7788 if (warn_on_deprecated && reg == REG_SP) \
7789 as_tsktsk (_("use of r13 is deprecated")); \
7790 while (0)
7791
7792 /* Functions for operand encoding. ARM, then Thumb. */
7793
7794 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7795
7796 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7797
7798 The only binary encoding difference is the Coprocessor number. Coprocessor
7799 9 is used for half-precision calculations or conversions. The format of the
7800 instruction is the same as the equivalent Coprocessor 10 instruction that
7801 exists for Single-Precision operation. */
7802
7803 static void
7804 do_scalar_fp16_v82_encode (void)
7805 {
7806 if (inst.cond < COND_ALWAYS)
7807 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7808 " the behaviour is UNPREDICTABLE"));
7809 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7810 _(BAD_FP16));
7811
7812 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7813 mark_feature_used (&arm_ext_fp16);
7814 }
7815
7816 /* If VAL can be encoded in the immediate field of an ARM instruction,
7817 return the encoded form. Otherwise, return FAIL. */
7818
7819 static unsigned int
7820 encode_arm_immediate (unsigned int val)
7821 {
7822 unsigned int a, i;
7823
7824 if (val <= 0xff)
7825 return val;
7826
7827 for (i = 2; i < 32; i += 2)
7828 if ((a = rotate_left (val, i)) <= 0xff)
7829 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7830
7831 return FAIL;
7832 }
7833
7834 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7835 return the encoded form. Otherwise, return FAIL. */
7836 static unsigned int
7837 encode_thumb32_immediate (unsigned int val)
7838 {
7839 unsigned int a, i;
7840
7841 if (val <= 0xff)
7842 return val;
7843
7844 for (i = 1; i <= 24; i++)
7845 {
7846 a = val >> i;
7847 if ((val & ~(0xff << i)) == 0)
7848 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7849 }
7850
7851 a = val & 0xff;
7852 if (val == ((a << 16) | a))
7853 return 0x100 | a;
7854 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7855 return 0x300 | a;
7856
7857 a = val & 0xff00;
7858 if (val == ((a << 16) | a))
7859 return 0x200 | (a >> 8);
7860
7861 return FAIL;
7862 }
7863 /* Encode a VFP SP or DP register number into inst.instruction. */
7864
7865 static void
7866 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7867 {
7868 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7869 && reg > 15)
7870 {
7871 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7872 {
7873 if (thumb_mode)
7874 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7875 fpu_vfp_ext_d32);
7876 else
7877 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7878 fpu_vfp_ext_d32);
7879 }
7880 else
7881 {
7882 first_error (_("D register out of range for selected VFP version"));
7883 return;
7884 }
7885 }
7886
7887 switch (pos)
7888 {
7889 case VFP_REG_Sd:
7890 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7891 break;
7892
7893 case VFP_REG_Sn:
7894 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7895 break;
7896
7897 case VFP_REG_Sm:
7898 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7899 break;
7900
7901 case VFP_REG_Dd:
7902 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7903 break;
7904
7905 case VFP_REG_Dn:
7906 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7907 break;
7908
7909 case VFP_REG_Dm:
7910 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7911 break;
7912
7913 default:
7914 abort ();
7915 }
7916 }
7917
7918 /* Encode a <shift> in an ARM-format instruction. The immediate,
7919 if any, is handled by md_apply_fix. */
7920 static void
7921 encode_arm_shift (int i)
7922 {
7923 /* register-shifted register. */
7924 if (inst.operands[i].immisreg)
7925 {
7926 int op_index;
7927 for (op_index = 0; op_index <= i; ++op_index)
7928 {
7929 /* Check the operand only when it's presented. In pre-UAL syntax,
7930 if the destination register is the same as the first operand, two
7931 register form of the instruction can be used. */
7932 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7933 && inst.operands[op_index].reg == REG_PC)
7934 as_warn (UNPRED_REG ("r15"));
7935 }
7936
7937 if (inst.operands[i].imm == REG_PC)
7938 as_warn (UNPRED_REG ("r15"));
7939 }
7940
7941 if (inst.operands[i].shift_kind == SHIFT_RRX)
7942 inst.instruction |= SHIFT_ROR << 5;
7943 else
7944 {
7945 inst.instruction |= inst.operands[i].shift_kind << 5;
7946 if (inst.operands[i].immisreg)
7947 {
7948 inst.instruction |= SHIFT_BY_REG;
7949 inst.instruction |= inst.operands[i].imm << 8;
7950 }
7951 else
7952 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7953 }
7954 }
7955
7956 static void
7957 encode_arm_shifter_operand (int i)
7958 {
7959 if (inst.operands[i].isreg)
7960 {
7961 inst.instruction |= inst.operands[i].reg;
7962 encode_arm_shift (i);
7963 }
7964 else
7965 {
7966 inst.instruction |= INST_IMMEDIATE;
7967 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7968 inst.instruction |= inst.operands[i].imm;
7969 }
7970 }
7971
7972 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7973 static void
7974 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7975 {
7976 /* PR 14260:
7977 Generate an error if the operand is not a register. */
7978 constraint (!inst.operands[i].isreg,
7979 _("Instruction does not support =N addresses"));
7980
7981 inst.instruction |= inst.operands[i].reg << 16;
7982
7983 if (inst.operands[i].preind)
7984 {
7985 if (is_t)
7986 {
7987 inst.error = _("instruction does not accept preindexed addressing");
7988 return;
7989 }
7990 inst.instruction |= PRE_INDEX;
7991 if (inst.operands[i].writeback)
7992 inst.instruction |= WRITE_BACK;
7993
7994 }
7995 else if (inst.operands[i].postind)
7996 {
7997 gas_assert (inst.operands[i].writeback);
7998 if (is_t)
7999 inst.instruction |= WRITE_BACK;
8000 }
8001 else /* unindexed - only for coprocessor */
8002 {
8003 inst.error = _("instruction does not accept unindexed addressing");
8004 return;
8005 }
8006
8007 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8008 && (((inst.instruction & 0x000f0000) >> 16)
8009 == ((inst.instruction & 0x0000f000) >> 12)))
8010 as_warn ((inst.instruction & LOAD_BIT)
8011 ? _("destination register same as write-back base")
8012 : _("source register same as write-back base"));
8013 }
8014
8015 /* inst.operands[i] was set up by parse_address. Encode it into an
8016 ARM-format mode 2 load or store instruction. If is_t is true,
8017 reject forms that cannot be used with a T instruction (i.e. not
8018 post-indexed). */
8019 static void
8020 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8021 {
8022 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8023
8024 encode_arm_addr_mode_common (i, is_t);
8025
8026 if (inst.operands[i].immisreg)
8027 {
8028 constraint ((inst.operands[i].imm == REG_PC
8029 || (is_pc && inst.operands[i].writeback)),
8030 BAD_PC_ADDRESSING);
8031 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8032 inst.instruction |= inst.operands[i].imm;
8033 if (!inst.operands[i].negative)
8034 inst.instruction |= INDEX_UP;
8035 if (inst.operands[i].shifted)
8036 {
8037 if (inst.operands[i].shift_kind == SHIFT_RRX)
8038 inst.instruction |= SHIFT_ROR << 5;
8039 else
8040 {
8041 inst.instruction |= inst.operands[i].shift_kind << 5;
8042 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8043 }
8044 }
8045 }
8046 else /* immediate offset in inst.relocs[0] */
8047 {
8048 if (is_pc && !inst.relocs[0].pc_rel)
8049 {
8050 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8051
8052 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8053 cannot use PC in addressing.
8054 PC cannot be used in writeback addressing, either. */
8055 constraint ((is_t || inst.operands[i].writeback),
8056 BAD_PC_ADDRESSING);
8057
8058 /* Use of PC in str is deprecated for ARMv7. */
8059 if (warn_on_deprecated
8060 && !is_load
8061 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8062 as_tsktsk (_("use of PC in this instruction is deprecated"));
8063 }
8064
8065 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8066 {
8067 /* Prefer + for zero encoded value. */
8068 if (!inst.operands[i].negative)
8069 inst.instruction |= INDEX_UP;
8070 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8071 }
8072 }
8073 }
8074
8075 /* inst.operands[i] was set up by parse_address. Encode it into an
8076 ARM-format mode 3 load or store instruction. Reject forms that
8077 cannot be used with such instructions. If is_t is true, reject
8078 forms that cannot be used with a T instruction (i.e. not
8079 post-indexed). */
8080 static void
8081 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8082 {
8083 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8084 {
8085 inst.error = _("instruction does not accept scaled register index");
8086 return;
8087 }
8088
8089 encode_arm_addr_mode_common (i, is_t);
8090
8091 if (inst.operands[i].immisreg)
8092 {
8093 constraint ((inst.operands[i].imm == REG_PC
8094 || (is_t && inst.operands[i].reg == REG_PC)),
8095 BAD_PC_ADDRESSING);
8096 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8097 BAD_PC_WRITEBACK);
8098 inst.instruction |= inst.operands[i].imm;
8099 if (!inst.operands[i].negative)
8100 inst.instruction |= INDEX_UP;
8101 }
8102 else /* immediate offset in inst.relocs[0] */
8103 {
8104 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8105 && inst.operands[i].writeback),
8106 BAD_PC_WRITEBACK);
8107 inst.instruction |= HWOFFSET_IMM;
8108 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8109 {
8110 /* Prefer + for zero encoded value. */
8111 if (!inst.operands[i].negative)
8112 inst.instruction |= INDEX_UP;
8113
8114 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8115 }
8116 }
8117 }
8118
8119 /* Write immediate bits [7:0] to the following locations:
8120
8121 |28/24|23 19|18 16|15 4|3 0|
8122 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8123
8124 This function is used by VMOV/VMVN/VORR/VBIC. */
8125
8126 static void
8127 neon_write_immbits (unsigned immbits)
8128 {
8129 inst.instruction |= immbits & 0xf;
8130 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8131 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8132 }
8133
8134 /* Invert low-order SIZE bits of XHI:XLO. */
8135
8136 static void
8137 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8138 {
8139 unsigned immlo = xlo ? *xlo : 0;
8140 unsigned immhi = xhi ? *xhi : 0;
8141
8142 switch (size)
8143 {
8144 case 8:
8145 immlo = (~immlo) & 0xff;
8146 break;
8147
8148 case 16:
8149 immlo = (~immlo) & 0xffff;
8150 break;
8151
8152 case 64:
8153 immhi = (~immhi) & 0xffffffff;
8154 /* fall through. */
8155
8156 case 32:
8157 immlo = (~immlo) & 0xffffffff;
8158 break;
8159
8160 default:
8161 abort ();
8162 }
8163
8164 if (xlo)
8165 *xlo = immlo;
8166
8167 if (xhi)
8168 *xhi = immhi;
8169 }
8170
8171 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8172 A, B, C, D. */
8173
8174 static int
8175 neon_bits_same_in_bytes (unsigned imm)
8176 {
8177 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8178 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8179 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8180 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8181 }
8182
8183 /* For immediate of above form, return 0bABCD. */
8184
8185 static unsigned
8186 neon_squash_bits (unsigned imm)
8187 {
8188 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8189 | ((imm & 0x01000000) >> 21);
8190 }
8191
8192 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8193
8194 static unsigned
8195 neon_qfloat_bits (unsigned imm)
8196 {
8197 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8198 }
8199
8200 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8201 the instruction. *OP is passed as the initial value of the op field, and
8202 may be set to a different value depending on the constant (i.e.
8203 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8204 MVN). If the immediate looks like a repeated pattern then also
8205 try smaller element sizes. */
8206
8207 static int
8208 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8209 unsigned *immbits, int *op, int size,
8210 enum neon_el_type type)
8211 {
8212 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8213 float. */
8214 if (type == NT_float && !float_p)
8215 return FAIL;
8216
8217 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8218 {
8219 if (size != 32 || *op == 1)
8220 return FAIL;
8221 *immbits = neon_qfloat_bits (immlo);
8222 return 0xf;
8223 }
8224
8225 if (size == 64)
8226 {
8227 if (neon_bits_same_in_bytes (immhi)
8228 && neon_bits_same_in_bytes (immlo))
8229 {
8230 if (*op == 1)
8231 return FAIL;
8232 *immbits = (neon_squash_bits (immhi) << 4)
8233 | neon_squash_bits (immlo);
8234 *op = 1;
8235 return 0xe;
8236 }
8237
8238 if (immhi != immlo)
8239 return FAIL;
8240 }
8241
8242 if (size >= 32)
8243 {
8244 if (immlo == (immlo & 0x000000ff))
8245 {
8246 *immbits = immlo;
8247 return 0x0;
8248 }
8249 else if (immlo == (immlo & 0x0000ff00))
8250 {
8251 *immbits = immlo >> 8;
8252 return 0x2;
8253 }
8254 else if (immlo == (immlo & 0x00ff0000))
8255 {
8256 *immbits = immlo >> 16;
8257 return 0x4;
8258 }
8259 else if (immlo == (immlo & 0xff000000))
8260 {
8261 *immbits = immlo >> 24;
8262 return 0x6;
8263 }
8264 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8265 {
8266 *immbits = (immlo >> 8) & 0xff;
8267 return 0xc;
8268 }
8269 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8270 {
8271 *immbits = (immlo >> 16) & 0xff;
8272 return 0xd;
8273 }
8274
8275 if ((immlo & 0xffff) != (immlo >> 16))
8276 return FAIL;
8277 immlo &= 0xffff;
8278 }
8279
8280 if (size >= 16)
8281 {
8282 if (immlo == (immlo & 0x000000ff))
8283 {
8284 *immbits = immlo;
8285 return 0x8;
8286 }
8287 else if (immlo == (immlo & 0x0000ff00))
8288 {
8289 *immbits = immlo >> 8;
8290 return 0xa;
8291 }
8292
8293 if ((immlo & 0xff) != (immlo >> 8))
8294 return FAIL;
8295 immlo &= 0xff;
8296 }
8297
8298 if (immlo == (immlo & 0x000000ff))
8299 {
8300 /* Don't allow MVN with 8-bit immediate. */
8301 if (*op == 1)
8302 return FAIL;
8303 *immbits = immlo;
8304 return 0xe;
8305 }
8306
8307 return FAIL;
8308 }
8309
8310 #if defined BFD_HOST_64_BIT
8311 /* Returns TRUE if double precision value V may be cast
8312 to single precision without loss of accuracy. */
8313
8314 static bfd_boolean
8315 is_double_a_single (bfd_int64_t v)
8316 {
8317 int exp = (int)((v >> 52) & 0x7FF);
8318 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8319
8320 return (exp == 0 || exp == 0x7FF
8321 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8322 && (mantissa & 0x1FFFFFFFl) == 0;
8323 }
8324
8325 /* Returns a double precision value casted to single precision
8326 (ignoring the least significant bits in exponent and mantissa). */
8327
8328 static int
8329 double_to_single (bfd_int64_t v)
8330 {
8331 int sign = (int) ((v >> 63) & 1l);
8332 int exp = (int) ((v >> 52) & 0x7FF);
8333 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8334
8335 if (exp == 0x7FF)
8336 exp = 0xFF;
8337 else
8338 {
8339 exp = exp - 1023 + 127;
8340 if (exp >= 0xFF)
8341 {
8342 /* Infinity. */
8343 exp = 0x7F;
8344 mantissa = 0;
8345 }
8346 else if (exp < 0)
8347 {
8348 /* No denormalized numbers. */
8349 exp = 0;
8350 mantissa = 0;
8351 }
8352 }
8353 mantissa >>= 29;
8354 return (sign << 31) | (exp << 23) | mantissa;
8355 }
8356 #endif /* BFD_HOST_64_BIT */
8357
8358 enum lit_type
8359 {
8360 CONST_THUMB,
8361 CONST_ARM,
8362 CONST_VEC
8363 };
8364
8365 static void do_vfp_nsyn_opcode (const char *);
8366
8367 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8368 Determine whether it can be performed with a move instruction; if
8369 it can, convert inst.instruction to that move instruction and
8370 return TRUE; if it can't, convert inst.instruction to a literal-pool
8371 load and return FALSE. If this is not a valid thing to do in the
8372 current context, set inst.error and return TRUE.
8373
8374 inst.operands[i] describes the destination register. */
8375
8376 static bfd_boolean
8377 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8378 {
8379 unsigned long tbit;
8380 bfd_boolean thumb_p = (t == CONST_THUMB);
8381 bfd_boolean arm_p = (t == CONST_ARM);
8382
8383 if (thumb_p)
8384 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8385 else
8386 tbit = LOAD_BIT;
8387
8388 if ((inst.instruction & tbit) == 0)
8389 {
8390 inst.error = _("invalid pseudo operation");
8391 return TRUE;
8392 }
8393
8394 if (inst.relocs[0].exp.X_op != O_constant
8395 && inst.relocs[0].exp.X_op != O_symbol
8396 && inst.relocs[0].exp.X_op != O_big)
8397 {
8398 inst.error = _("constant expression expected");
8399 return TRUE;
8400 }
8401
8402 if (inst.relocs[0].exp.X_op == O_constant
8403 || inst.relocs[0].exp.X_op == O_big)
8404 {
8405 #if defined BFD_HOST_64_BIT
8406 bfd_int64_t v;
8407 #else
8408 offsetT v;
8409 #endif
8410 if (inst.relocs[0].exp.X_op == O_big)
8411 {
8412 LITTLENUM_TYPE w[X_PRECISION];
8413 LITTLENUM_TYPE * l;
8414
8415 if (inst.relocs[0].exp.X_add_number == -1)
8416 {
8417 gen_to_words (w, X_PRECISION, E_PRECISION);
8418 l = w;
8419 /* FIXME: Should we check words w[2..5] ? */
8420 }
8421 else
8422 l = generic_bignum;
8423
8424 #if defined BFD_HOST_64_BIT
8425 v =
8426 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8427 << LITTLENUM_NUMBER_OF_BITS)
8428 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8429 << LITTLENUM_NUMBER_OF_BITS)
8430 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8431 << LITTLENUM_NUMBER_OF_BITS)
8432 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8433 #else
8434 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8435 | (l[0] & LITTLENUM_MASK);
8436 #endif
8437 }
8438 else
8439 v = inst.relocs[0].exp.X_add_number;
8440
8441 if (!inst.operands[i].issingle)
8442 {
8443 if (thumb_p)
8444 {
8445 /* LDR should not use lead in a flag-setting instruction being
8446 chosen so we do not check whether movs can be used. */
8447
8448 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8449 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8450 && inst.operands[i].reg != 13
8451 && inst.operands[i].reg != 15)
8452 {
8453 /* Check if on thumb2 it can be done with a mov.w, mvn or
8454 movw instruction. */
8455 unsigned int newimm;
8456 bfd_boolean isNegated;
8457
8458 newimm = encode_thumb32_immediate (v);
8459 if (newimm != (unsigned int) FAIL)
8460 isNegated = FALSE;
8461 else
8462 {
8463 newimm = encode_thumb32_immediate (~v);
8464 if (newimm != (unsigned int) FAIL)
8465 isNegated = TRUE;
8466 }
8467
8468 /* The number can be loaded with a mov.w or mvn
8469 instruction. */
8470 if (newimm != (unsigned int) FAIL
8471 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8472 {
8473 inst.instruction = (0xf04f0000 /* MOV.W. */
8474 | (inst.operands[i].reg << 8));
8475 /* Change to MOVN. */
8476 inst.instruction |= (isNegated ? 0x200000 : 0);
8477 inst.instruction |= (newimm & 0x800) << 15;
8478 inst.instruction |= (newimm & 0x700) << 4;
8479 inst.instruction |= (newimm & 0x0ff);
8480 return TRUE;
8481 }
8482 /* The number can be loaded with a movw instruction. */
8483 else if ((v & ~0xFFFF) == 0
8484 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8485 {
8486 int imm = v & 0xFFFF;
8487
8488 inst.instruction = 0xf2400000; /* MOVW. */
8489 inst.instruction |= (inst.operands[i].reg << 8);
8490 inst.instruction |= (imm & 0xf000) << 4;
8491 inst.instruction |= (imm & 0x0800) << 15;
8492 inst.instruction |= (imm & 0x0700) << 4;
8493 inst.instruction |= (imm & 0x00ff);
8494 return TRUE;
8495 }
8496 }
8497 }
8498 else if (arm_p)
8499 {
8500 int value = encode_arm_immediate (v);
8501
8502 if (value != FAIL)
8503 {
8504 /* This can be done with a mov instruction. */
8505 inst.instruction &= LITERAL_MASK;
8506 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8507 inst.instruction |= value & 0xfff;
8508 return TRUE;
8509 }
8510
8511 value = encode_arm_immediate (~ v);
8512 if (value != FAIL)
8513 {
8514 /* This can be done with a mvn instruction. */
8515 inst.instruction &= LITERAL_MASK;
8516 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8517 inst.instruction |= value & 0xfff;
8518 return TRUE;
8519 }
8520 }
8521 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8522 {
8523 int op = 0;
8524 unsigned immbits = 0;
8525 unsigned immlo = inst.operands[1].imm;
8526 unsigned immhi = inst.operands[1].regisimm
8527 ? inst.operands[1].reg
8528 : inst.relocs[0].exp.X_unsigned
8529 ? 0
8530 : ((bfd_int64_t)((int) immlo)) >> 32;
8531 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8532 &op, 64, NT_invtype);
8533
8534 if (cmode == FAIL)
8535 {
8536 neon_invert_size (&immlo, &immhi, 64);
8537 op = !op;
8538 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8539 &op, 64, NT_invtype);
8540 }
8541
8542 if (cmode != FAIL)
8543 {
8544 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8545 | (1 << 23)
8546 | (cmode << 8)
8547 | (op << 5)
8548 | (1 << 4);
8549
8550 /* Fill other bits in vmov encoding for both thumb and arm. */
8551 if (thumb_mode)
8552 inst.instruction |= (0x7U << 29) | (0xF << 24);
8553 else
8554 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8555 neon_write_immbits (immbits);
8556 return TRUE;
8557 }
8558 }
8559 }
8560
8561 if (t == CONST_VEC)
8562 {
8563 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8564 if (inst.operands[i].issingle
8565 && is_quarter_float (inst.operands[1].imm)
8566 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8567 {
8568 inst.operands[1].imm =
8569 neon_qfloat_bits (v);
8570 do_vfp_nsyn_opcode ("fconsts");
8571 return TRUE;
8572 }
8573
8574 /* If our host does not support a 64-bit type then we cannot perform
8575 the following optimization. This mean that there will be a
8576 discrepancy between the output produced by an assembler built for
8577 a 32-bit-only host and the output produced from a 64-bit host, but
8578 this cannot be helped. */
8579 #if defined BFD_HOST_64_BIT
8580 else if (!inst.operands[1].issingle
8581 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8582 {
8583 if (is_double_a_single (v)
8584 && is_quarter_float (double_to_single (v)))
8585 {
8586 inst.operands[1].imm =
8587 neon_qfloat_bits (double_to_single (v));
8588 do_vfp_nsyn_opcode ("fconstd");
8589 return TRUE;
8590 }
8591 }
8592 #endif
8593 }
8594 }
8595
8596 if (add_to_lit_pool ((!inst.operands[i].isvec
8597 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8598 return TRUE;
8599
8600 inst.operands[1].reg = REG_PC;
8601 inst.operands[1].isreg = 1;
8602 inst.operands[1].preind = 1;
8603 inst.relocs[0].pc_rel = 1;
8604 inst.relocs[0].type = (thumb_p
8605 ? BFD_RELOC_ARM_THUMB_OFFSET
8606 : (mode_3
8607 ? BFD_RELOC_ARM_HWLITERAL
8608 : BFD_RELOC_ARM_LITERAL));
8609 return FALSE;
8610 }
8611
8612 /* inst.operands[i] was set up by parse_address. Encode it into an
8613 ARM-format instruction. Reject all forms which cannot be encoded
8614 into a coprocessor load/store instruction. If wb_ok is false,
8615 reject use of writeback; if unind_ok is false, reject use of
8616 unindexed addressing. If reloc_override is not 0, use it instead
8617 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8618 (in which case it is preserved). */
8619
8620 static int
8621 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8622 {
8623 if (!inst.operands[i].isreg)
8624 {
8625 /* PR 18256 */
8626 if (! inst.operands[0].isvec)
8627 {
8628 inst.error = _("invalid co-processor operand");
8629 return FAIL;
8630 }
8631 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8632 return SUCCESS;
8633 }
8634
8635 inst.instruction |= inst.operands[i].reg << 16;
8636
8637 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8638
8639 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8640 {
8641 gas_assert (!inst.operands[i].writeback);
8642 if (!unind_ok)
8643 {
8644 inst.error = _("instruction does not support unindexed addressing");
8645 return FAIL;
8646 }
8647 inst.instruction |= inst.operands[i].imm;
8648 inst.instruction |= INDEX_UP;
8649 return SUCCESS;
8650 }
8651
8652 if (inst.operands[i].preind)
8653 inst.instruction |= PRE_INDEX;
8654
8655 if (inst.operands[i].writeback)
8656 {
8657 if (inst.operands[i].reg == REG_PC)
8658 {
8659 inst.error = _("pc may not be used with write-back");
8660 return FAIL;
8661 }
8662 if (!wb_ok)
8663 {
8664 inst.error = _("instruction does not support writeback");
8665 return FAIL;
8666 }
8667 inst.instruction |= WRITE_BACK;
8668 }
8669
8670 if (reloc_override)
8671 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8672 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8673 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8674 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8675 {
8676 if (thumb_mode)
8677 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8678 else
8679 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8680 }
8681
8682 /* Prefer + for zero encoded value. */
8683 if (!inst.operands[i].negative)
8684 inst.instruction |= INDEX_UP;
8685
8686 return SUCCESS;
8687 }
8688
8689 /* Functions for instruction encoding, sorted by sub-architecture.
8690 First some generics; their names are taken from the conventional
8691 bit positions for register arguments in ARM format instructions. */
8692
8693 static void
8694 do_noargs (void)
8695 {
8696 }
8697
8698 static void
8699 do_rd (void)
8700 {
8701 inst.instruction |= inst.operands[0].reg << 12;
8702 }
8703
8704 static void
8705 do_rn (void)
8706 {
8707 inst.instruction |= inst.operands[0].reg << 16;
8708 }
8709
8710 static void
8711 do_rd_rm (void)
8712 {
8713 inst.instruction |= inst.operands[0].reg << 12;
8714 inst.instruction |= inst.operands[1].reg;
8715 }
8716
8717 static void
8718 do_rm_rn (void)
8719 {
8720 inst.instruction |= inst.operands[0].reg;
8721 inst.instruction |= inst.operands[1].reg << 16;
8722 }
8723
8724 static void
8725 do_rd_rn (void)
8726 {
8727 inst.instruction |= inst.operands[0].reg << 12;
8728 inst.instruction |= inst.operands[1].reg << 16;
8729 }
8730
8731 static void
8732 do_rn_rd (void)
8733 {
8734 inst.instruction |= inst.operands[0].reg << 16;
8735 inst.instruction |= inst.operands[1].reg << 12;
8736 }
8737
8738 static void
8739 do_tt (void)
8740 {
8741 inst.instruction |= inst.operands[0].reg << 8;
8742 inst.instruction |= inst.operands[1].reg << 16;
8743 }
8744
8745 static bfd_boolean
8746 check_obsolete (const arm_feature_set *feature, const char *msg)
8747 {
8748 if (ARM_CPU_IS_ANY (cpu_variant))
8749 {
8750 as_tsktsk ("%s", msg);
8751 return TRUE;
8752 }
8753 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8754 {
8755 as_bad ("%s", msg);
8756 return TRUE;
8757 }
8758
8759 return FALSE;
8760 }
8761
8762 static void
8763 do_rd_rm_rn (void)
8764 {
8765 unsigned Rn = inst.operands[2].reg;
8766 /* Enforce restrictions on SWP instruction. */
8767 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8768 {
8769 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8770 _("Rn must not overlap other operands"));
8771
8772 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8773 */
8774 if (!check_obsolete (&arm_ext_v8,
8775 _("swp{b} use is obsoleted for ARMv8 and later"))
8776 && warn_on_deprecated
8777 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8778 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8779 }
8780
8781 inst.instruction |= inst.operands[0].reg << 12;
8782 inst.instruction |= inst.operands[1].reg;
8783 inst.instruction |= Rn << 16;
8784 }
8785
8786 static void
8787 do_rd_rn_rm (void)
8788 {
8789 inst.instruction |= inst.operands[0].reg << 12;
8790 inst.instruction |= inst.operands[1].reg << 16;
8791 inst.instruction |= inst.operands[2].reg;
8792 }
8793
8794 static void
8795 do_rm_rd_rn (void)
8796 {
8797 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8798 constraint (((inst.relocs[0].exp.X_op != O_constant
8799 && inst.relocs[0].exp.X_op != O_illegal)
8800 || inst.relocs[0].exp.X_add_number != 0),
8801 BAD_ADDR_MODE);
8802 inst.instruction |= inst.operands[0].reg;
8803 inst.instruction |= inst.operands[1].reg << 12;
8804 inst.instruction |= inst.operands[2].reg << 16;
8805 }
8806
8807 static void
8808 do_imm0 (void)
8809 {
8810 inst.instruction |= inst.operands[0].imm;
8811 }
8812
8813 static void
8814 do_rd_cpaddr (void)
8815 {
8816 inst.instruction |= inst.operands[0].reg << 12;
8817 encode_arm_cp_address (1, TRUE, TRUE, 0);
8818 }
8819
8820 /* ARM instructions, in alphabetical order by function name (except
8821 that wrapper functions appear immediately after the function they
8822 wrap). */
8823
8824 /* This is a pseudo-op of the form "adr rd, label" to be converted
8825 into a relative address of the form "add rd, pc, #label-.-8". */
8826
8827 static void
8828 do_adr (void)
8829 {
8830 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8831
8832 /* Frag hacking will turn this into a sub instruction if the offset turns
8833 out to be negative. */
8834 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8835 inst.relocs[0].pc_rel = 1;
8836 inst.relocs[0].exp.X_add_number -= 8;
8837
8838 if (support_interwork
8839 && inst.relocs[0].exp.X_op == O_symbol
8840 && inst.relocs[0].exp.X_add_symbol != NULL
8841 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8842 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8843 inst.relocs[0].exp.X_add_number |= 1;
8844 }
8845
8846 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8847 into a relative address of the form:
8848 add rd, pc, #low(label-.-8)"
8849 add rd, rd, #high(label-.-8)" */
8850
8851 static void
8852 do_adrl (void)
8853 {
8854 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8855
8856 /* Frag hacking will turn this into a sub instruction if the offset turns
8857 out to be negative. */
8858 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8859 inst.relocs[0].pc_rel = 1;
8860 inst.size = INSN_SIZE * 2;
8861 inst.relocs[0].exp.X_add_number -= 8;
8862
8863 if (support_interwork
8864 && inst.relocs[0].exp.X_op == O_symbol
8865 && inst.relocs[0].exp.X_add_symbol != NULL
8866 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8867 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8868 inst.relocs[0].exp.X_add_number |= 1;
8869 }
8870
8871 static void
8872 do_arit (void)
8873 {
8874 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8875 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8876 THUMB1_RELOC_ONLY);
8877 if (!inst.operands[1].present)
8878 inst.operands[1].reg = inst.operands[0].reg;
8879 inst.instruction |= inst.operands[0].reg << 12;
8880 inst.instruction |= inst.operands[1].reg << 16;
8881 encode_arm_shifter_operand (2);
8882 }
8883
8884 static void
8885 do_barrier (void)
8886 {
8887 if (inst.operands[0].present)
8888 inst.instruction |= inst.operands[0].imm;
8889 else
8890 inst.instruction |= 0xf;
8891 }
8892
8893 static void
8894 do_bfc (void)
8895 {
8896 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8897 constraint (msb > 32, _("bit-field extends past end of register"));
8898 /* The instruction encoding stores the LSB and MSB,
8899 not the LSB and width. */
8900 inst.instruction |= inst.operands[0].reg << 12;
8901 inst.instruction |= inst.operands[1].imm << 7;
8902 inst.instruction |= (msb - 1) << 16;
8903 }
8904
8905 static void
8906 do_bfi (void)
8907 {
8908 unsigned int msb;
8909
8910 /* #0 in second position is alternative syntax for bfc, which is
8911 the same instruction but with REG_PC in the Rm field. */
8912 if (!inst.operands[1].isreg)
8913 inst.operands[1].reg = REG_PC;
8914
8915 msb = inst.operands[2].imm + inst.operands[3].imm;
8916 constraint (msb > 32, _("bit-field extends past end of register"));
8917 /* The instruction encoding stores the LSB and MSB,
8918 not the LSB and width. */
8919 inst.instruction |= inst.operands[0].reg << 12;
8920 inst.instruction |= inst.operands[1].reg;
8921 inst.instruction |= inst.operands[2].imm << 7;
8922 inst.instruction |= (msb - 1) << 16;
8923 }
8924
8925 static void
8926 do_bfx (void)
8927 {
8928 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8929 _("bit-field extends past end of register"));
8930 inst.instruction |= inst.operands[0].reg << 12;
8931 inst.instruction |= inst.operands[1].reg;
8932 inst.instruction |= inst.operands[2].imm << 7;
8933 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8934 }
8935
8936 /* ARM V5 breakpoint instruction (argument parse)
8937 BKPT <16 bit unsigned immediate>
8938 Instruction is not conditional.
8939 The bit pattern given in insns[] has the COND_ALWAYS condition,
8940 and it is an error if the caller tried to override that. */
8941
8942 static void
8943 do_bkpt (void)
8944 {
8945 /* Top 12 of 16 bits to bits 19:8. */
8946 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8947
8948 /* Bottom 4 of 16 bits to bits 3:0. */
8949 inst.instruction |= inst.operands[0].imm & 0xf;
8950 }
8951
8952 static void
8953 encode_branch (int default_reloc)
8954 {
8955 if (inst.operands[0].hasreloc)
8956 {
8957 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8958 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8959 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8960 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8961 ? BFD_RELOC_ARM_PLT32
8962 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8963 }
8964 else
8965 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8966 inst.relocs[0].pc_rel = 1;
8967 }
8968
8969 static void
8970 do_branch (void)
8971 {
8972 #ifdef OBJ_ELF
8973 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8974 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8975 else
8976 #endif
8977 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8978 }
8979
8980 static void
8981 do_bl (void)
8982 {
8983 #ifdef OBJ_ELF
8984 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8985 {
8986 if (inst.cond == COND_ALWAYS)
8987 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8988 else
8989 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8990 }
8991 else
8992 #endif
8993 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8994 }
8995
8996 /* ARM V5 branch-link-exchange instruction (argument parse)
8997 BLX <target_addr> ie BLX(1)
8998 BLX{<condition>} <Rm> ie BLX(2)
8999 Unfortunately, there are two different opcodes for this mnemonic.
9000 So, the insns[].value is not used, and the code here zaps values
9001 into inst.instruction.
9002 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9003
9004 static void
9005 do_blx (void)
9006 {
9007 if (inst.operands[0].isreg)
9008 {
9009 /* Arg is a register; the opcode provided by insns[] is correct.
9010 It is not illegal to do "blx pc", just useless. */
9011 if (inst.operands[0].reg == REG_PC)
9012 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9013
9014 inst.instruction |= inst.operands[0].reg;
9015 }
9016 else
9017 {
9018 /* Arg is an address; this instruction cannot be executed
9019 conditionally, and the opcode must be adjusted.
9020 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9021 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9022 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9023 inst.instruction = 0xfa000000;
9024 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9025 }
9026 }
9027
9028 static void
9029 do_bx (void)
9030 {
9031 bfd_boolean want_reloc;
9032
9033 if (inst.operands[0].reg == REG_PC)
9034 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9035
9036 inst.instruction |= inst.operands[0].reg;
9037 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9038 it is for ARMv4t or earlier. */
9039 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9040 if (!ARM_FEATURE_ZERO (selected_object_arch)
9041 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9042 want_reloc = TRUE;
9043
9044 #ifdef OBJ_ELF
9045 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9046 #endif
9047 want_reloc = FALSE;
9048
9049 if (want_reloc)
9050 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9051 }
9052
9053
9054 /* ARM v5TEJ. Jump to Jazelle code. */
9055
9056 static void
9057 do_bxj (void)
9058 {
9059 if (inst.operands[0].reg == REG_PC)
9060 as_tsktsk (_("use of r15 in bxj is not really useful"));
9061
9062 inst.instruction |= inst.operands[0].reg;
9063 }
9064
9065 /* Co-processor data operation:
9066 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9067 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9068 static void
9069 do_cdp (void)
9070 {
9071 inst.instruction |= inst.operands[0].reg << 8;
9072 inst.instruction |= inst.operands[1].imm << 20;
9073 inst.instruction |= inst.operands[2].reg << 12;
9074 inst.instruction |= inst.operands[3].reg << 16;
9075 inst.instruction |= inst.operands[4].reg;
9076 inst.instruction |= inst.operands[5].imm << 5;
9077 }
9078
9079 static void
9080 do_cmp (void)
9081 {
9082 inst.instruction |= inst.operands[0].reg << 16;
9083 encode_arm_shifter_operand (1);
9084 }
9085
9086 /* Transfer between coprocessor and ARM registers.
9087 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9088 MRC2
9089 MCR{cond}
9090 MCR2
9091
9092 No special properties. */
9093
9094 struct deprecated_coproc_regs_s
9095 {
9096 unsigned cp;
9097 int opc1;
9098 unsigned crn;
9099 unsigned crm;
9100 int opc2;
9101 arm_feature_set deprecated;
9102 arm_feature_set obsoleted;
9103 const char *dep_msg;
9104 const char *obs_msg;
9105 };
9106
9107 #define DEPR_ACCESS_V8 \
9108 N_("This coprocessor register access is deprecated in ARMv8")
9109
9110 /* Table of all deprecated coprocessor registers. */
9111 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9112 {
9113 {15, 0, 7, 10, 5, /* CP15DMB. */
9114 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9115 DEPR_ACCESS_V8, NULL},
9116 {15, 0, 7, 10, 4, /* CP15DSB. */
9117 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9118 DEPR_ACCESS_V8, NULL},
9119 {15, 0, 7, 5, 4, /* CP15ISB. */
9120 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9121 DEPR_ACCESS_V8, NULL},
9122 {14, 6, 1, 0, 0, /* TEEHBR. */
9123 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9124 DEPR_ACCESS_V8, NULL},
9125 {14, 6, 0, 0, 0, /* TEECR. */
9126 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9127 DEPR_ACCESS_V8, NULL},
9128 };
9129
9130 #undef DEPR_ACCESS_V8
9131
9132 static const size_t deprecated_coproc_reg_count =
9133 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9134
9135 static void
9136 do_co_reg (void)
9137 {
9138 unsigned Rd;
9139 size_t i;
9140
9141 Rd = inst.operands[2].reg;
9142 if (thumb_mode)
9143 {
9144 if (inst.instruction == 0xee000010
9145 || inst.instruction == 0xfe000010)
9146 /* MCR, MCR2 */
9147 reject_bad_reg (Rd);
9148 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9149 /* MRC, MRC2 */
9150 constraint (Rd == REG_SP, BAD_SP);
9151 }
9152 else
9153 {
9154 /* MCR */
9155 if (inst.instruction == 0xe000010)
9156 constraint (Rd == REG_PC, BAD_PC);
9157 }
9158
9159 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9160 {
9161 const struct deprecated_coproc_regs_s *r =
9162 deprecated_coproc_regs + i;
9163
9164 if (inst.operands[0].reg == r->cp
9165 && inst.operands[1].imm == r->opc1
9166 && inst.operands[3].reg == r->crn
9167 && inst.operands[4].reg == r->crm
9168 && inst.operands[5].imm == r->opc2)
9169 {
9170 if (! ARM_CPU_IS_ANY (cpu_variant)
9171 && warn_on_deprecated
9172 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9173 as_tsktsk ("%s", r->dep_msg);
9174 }
9175 }
9176
9177 inst.instruction |= inst.operands[0].reg << 8;
9178 inst.instruction |= inst.operands[1].imm << 21;
9179 inst.instruction |= Rd << 12;
9180 inst.instruction |= inst.operands[3].reg << 16;
9181 inst.instruction |= inst.operands[4].reg;
9182 inst.instruction |= inst.operands[5].imm << 5;
9183 }
9184
9185 /* Transfer between coprocessor register and pair of ARM registers.
9186 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9187 MCRR2
9188 MRRC{cond}
9189 MRRC2
9190
9191 Two XScale instructions are special cases of these:
9192
9193 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9194 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9195
9196 Result unpredictable if Rd or Rn is R15. */
9197
9198 static void
9199 do_co_reg2c (void)
9200 {
9201 unsigned Rd, Rn;
9202
9203 Rd = inst.operands[2].reg;
9204 Rn = inst.operands[3].reg;
9205
9206 if (thumb_mode)
9207 {
9208 reject_bad_reg (Rd);
9209 reject_bad_reg (Rn);
9210 }
9211 else
9212 {
9213 constraint (Rd == REG_PC, BAD_PC);
9214 constraint (Rn == REG_PC, BAD_PC);
9215 }
9216
9217 /* Only check the MRRC{2} variants. */
9218 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9219 {
9220 /* If Rd == Rn, error that the operation is
9221 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9222 constraint (Rd == Rn, BAD_OVERLAP);
9223 }
9224
9225 inst.instruction |= inst.operands[0].reg << 8;
9226 inst.instruction |= inst.operands[1].imm << 4;
9227 inst.instruction |= Rd << 12;
9228 inst.instruction |= Rn << 16;
9229 inst.instruction |= inst.operands[4].reg;
9230 }
9231
9232 static void
9233 do_cpsi (void)
9234 {
9235 inst.instruction |= inst.operands[0].imm << 6;
9236 if (inst.operands[1].present)
9237 {
9238 inst.instruction |= CPSI_MMOD;
9239 inst.instruction |= inst.operands[1].imm;
9240 }
9241 }
9242
9243 static void
9244 do_dbg (void)
9245 {
9246 inst.instruction |= inst.operands[0].imm;
9247 }
9248
9249 static void
9250 do_div (void)
9251 {
9252 unsigned Rd, Rn, Rm;
9253
9254 Rd = inst.operands[0].reg;
9255 Rn = (inst.operands[1].present
9256 ? inst.operands[1].reg : Rd);
9257 Rm = inst.operands[2].reg;
9258
9259 constraint ((Rd == REG_PC), BAD_PC);
9260 constraint ((Rn == REG_PC), BAD_PC);
9261 constraint ((Rm == REG_PC), BAD_PC);
9262
9263 inst.instruction |= Rd << 16;
9264 inst.instruction |= Rn << 0;
9265 inst.instruction |= Rm << 8;
9266 }
9267
9268 static void
9269 do_it (void)
9270 {
9271 /* There is no IT instruction in ARM mode. We
9272 process it to do the validation as if in
9273 thumb mode, just in case the code gets
9274 assembled for thumb using the unified syntax. */
9275
9276 inst.size = 0;
9277 if (unified_syntax)
9278 {
9279 set_pred_insn_type (IT_INSN);
9280 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9281 now_pred.cc = inst.operands[0].imm;
9282 }
9283 }
9284
9285 /* If there is only one register in the register list,
9286 then return its register number. Otherwise return -1. */
9287 static int
9288 only_one_reg_in_list (int range)
9289 {
9290 int i = ffs (range) - 1;
9291 return (i > 15 || range != (1 << i)) ? -1 : i;
9292 }
9293
9294 static void
9295 encode_ldmstm(int from_push_pop_mnem)
9296 {
9297 int base_reg = inst.operands[0].reg;
9298 int range = inst.operands[1].imm;
9299 int one_reg;
9300
9301 inst.instruction |= base_reg << 16;
9302 inst.instruction |= range;
9303
9304 if (inst.operands[1].writeback)
9305 inst.instruction |= LDM_TYPE_2_OR_3;
9306
9307 if (inst.operands[0].writeback)
9308 {
9309 inst.instruction |= WRITE_BACK;
9310 /* Check for unpredictable uses of writeback. */
9311 if (inst.instruction & LOAD_BIT)
9312 {
9313 /* Not allowed in LDM type 2. */
9314 if ((inst.instruction & LDM_TYPE_2_OR_3)
9315 && ((range & (1 << REG_PC)) == 0))
9316 as_warn (_("writeback of base register is UNPREDICTABLE"));
9317 /* Only allowed if base reg not in list for other types. */
9318 else if (range & (1 << base_reg))
9319 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9320 }
9321 else /* STM. */
9322 {
9323 /* Not allowed for type 2. */
9324 if (inst.instruction & LDM_TYPE_2_OR_3)
9325 as_warn (_("writeback of base register is UNPREDICTABLE"));
9326 /* Only allowed if base reg not in list, or first in list. */
9327 else if ((range & (1 << base_reg))
9328 && (range & ((1 << base_reg) - 1)))
9329 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9330 }
9331 }
9332
9333 /* If PUSH/POP has only one register, then use the A2 encoding. */
9334 one_reg = only_one_reg_in_list (range);
9335 if (from_push_pop_mnem && one_reg >= 0)
9336 {
9337 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9338
9339 if (is_push && one_reg == 13 /* SP */)
9340 /* PR 22483: The A2 encoding cannot be used when
9341 pushing the stack pointer as this is UNPREDICTABLE. */
9342 return;
9343
9344 inst.instruction &= A_COND_MASK;
9345 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9346 inst.instruction |= one_reg << 12;
9347 }
9348 }
9349
9350 static void
9351 do_ldmstm (void)
9352 {
9353 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9354 }
9355
9356 /* ARMv5TE load-consecutive (argument parse)
9357 Mode is like LDRH.
9358
9359 LDRccD R, mode
9360 STRccD R, mode. */
9361
9362 static void
9363 do_ldrd (void)
9364 {
9365 constraint (inst.operands[0].reg % 2 != 0,
9366 _("first transfer register must be even"));
9367 constraint (inst.operands[1].present
9368 && inst.operands[1].reg != inst.operands[0].reg + 1,
9369 _("can only transfer two consecutive registers"));
9370 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9371 constraint (!inst.operands[2].isreg, _("'[' expected"));
9372
9373 if (!inst.operands[1].present)
9374 inst.operands[1].reg = inst.operands[0].reg + 1;
9375
9376 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9377 register and the first register written; we have to diagnose
9378 overlap between the base and the second register written here. */
9379
9380 if (inst.operands[2].reg == inst.operands[1].reg
9381 && (inst.operands[2].writeback || inst.operands[2].postind))
9382 as_warn (_("base register written back, and overlaps "
9383 "second transfer register"));
9384
9385 if (!(inst.instruction & V4_STR_BIT))
9386 {
9387 /* For an index-register load, the index register must not overlap the
9388 destination (even if not write-back). */
9389 if (inst.operands[2].immisreg
9390 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9391 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9392 as_warn (_("index register overlaps transfer register"));
9393 }
9394 inst.instruction |= inst.operands[0].reg << 12;
9395 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9396 }
9397
9398 static void
9399 do_ldrex (void)
9400 {
9401 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9402 || inst.operands[1].postind || inst.operands[1].writeback
9403 || inst.operands[1].immisreg || inst.operands[1].shifted
9404 || inst.operands[1].negative
9405 /* This can arise if the programmer has written
9406 strex rN, rM, foo
9407 or if they have mistakenly used a register name as the last
9408 operand, eg:
9409 strex rN, rM, rX
9410 It is very difficult to distinguish between these two cases
9411 because "rX" might actually be a label. ie the register
9412 name has been occluded by a symbol of the same name. So we
9413 just generate a general 'bad addressing mode' type error
9414 message and leave it up to the programmer to discover the
9415 true cause and fix their mistake. */
9416 || (inst.operands[1].reg == REG_PC),
9417 BAD_ADDR_MODE);
9418
9419 constraint (inst.relocs[0].exp.X_op != O_constant
9420 || inst.relocs[0].exp.X_add_number != 0,
9421 _("offset must be zero in ARM encoding"));
9422
9423 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9424
9425 inst.instruction |= inst.operands[0].reg << 12;
9426 inst.instruction |= inst.operands[1].reg << 16;
9427 inst.relocs[0].type = BFD_RELOC_UNUSED;
9428 }
9429
9430 static void
9431 do_ldrexd (void)
9432 {
9433 constraint (inst.operands[0].reg % 2 != 0,
9434 _("even register required"));
9435 constraint (inst.operands[1].present
9436 && inst.operands[1].reg != inst.operands[0].reg + 1,
9437 _("can only load two consecutive registers"));
9438 /* If op 1 were present and equal to PC, this function wouldn't
9439 have been called in the first place. */
9440 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9441
9442 inst.instruction |= inst.operands[0].reg << 12;
9443 inst.instruction |= inst.operands[2].reg << 16;
9444 }
9445
9446 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9447 which is not a multiple of four is UNPREDICTABLE. */
9448 static void
9449 check_ldr_r15_aligned (void)
9450 {
9451 constraint (!(inst.operands[1].immisreg)
9452 && (inst.operands[0].reg == REG_PC
9453 && inst.operands[1].reg == REG_PC
9454 && (inst.relocs[0].exp.X_add_number & 0x3)),
9455 _("ldr to register 15 must be 4-byte aligned"));
9456 }
9457
9458 static void
9459 do_ldst (void)
9460 {
9461 inst.instruction |= inst.operands[0].reg << 12;
9462 if (!inst.operands[1].isreg)
9463 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9464 return;
9465 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9466 check_ldr_r15_aligned ();
9467 }
9468
9469 static void
9470 do_ldstt (void)
9471 {
9472 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9473 reject [Rn,...]. */
9474 if (inst.operands[1].preind)
9475 {
9476 constraint (inst.relocs[0].exp.X_op != O_constant
9477 || inst.relocs[0].exp.X_add_number != 0,
9478 _("this instruction requires a post-indexed address"));
9479
9480 inst.operands[1].preind = 0;
9481 inst.operands[1].postind = 1;
9482 inst.operands[1].writeback = 1;
9483 }
9484 inst.instruction |= inst.operands[0].reg << 12;
9485 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9486 }
9487
9488 /* Halfword and signed-byte load/store operations. */
9489
9490 static void
9491 do_ldstv4 (void)
9492 {
9493 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9494 inst.instruction |= inst.operands[0].reg << 12;
9495 if (!inst.operands[1].isreg)
9496 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9497 return;
9498 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9499 }
9500
9501 static void
9502 do_ldsttv4 (void)
9503 {
9504 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9505 reject [Rn,...]. */
9506 if (inst.operands[1].preind)
9507 {
9508 constraint (inst.relocs[0].exp.X_op != O_constant
9509 || inst.relocs[0].exp.X_add_number != 0,
9510 _("this instruction requires a post-indexed address"));
9511
9512 inst.operands[1].preind = 0;
9513 inst.operands[1].postind = 1;
9514 inst.operands[1].writeback = 1;
9515 }
9516 inst.instruction |= inst.operands[0].reg << 12;
9517 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9518 }
9519
9520 /* Co-processor register load/store.
9521 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9522 static void
9523 do_lstc (void)
9524 {
9525 inst.instruction |= inst.operands[0].reg << 8;
9526 inst.instruction |= inst.operands[1].reg << 12;
9527 encode_arm_cp_address (2, TRUE, TRUE, 0);
9528 }
9529
9530 static void
9531 do_mlas (void)
9532 {
9533 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9534 if (inst.operands[0].reg == inst.operands[1].reg
9535 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9536 && !(inst.instruction & 0x00400000))
9537 as_tsktsk (_("Rd and Rm should be different in mla"));
9538
9539 inst.instruction |= inst.operands[0].reg << 16;
9540 inst.instruction |= inst.operands[1].reg;
9541 inst.instruction |= inst.operands[2].reg << 8;
9542 inst.instruction |= inst.operands[3].reg << 12;
9543 }
9544
9545 static void
9546 do_mov (void)
9547 {
9548 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9549 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9550 THUMB1_RELOC_ONLY);
9551 inst.instruction |= inst.operands[0].reg << 12;
9552 encode_arm_shifter_operand (1);
9553 }
9554
9555 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9556 static void
9557 do_mov16 (void)
9558 {
9559 bfd_vma imm;
9560 bfd_boolean top;
9561
9562 top = (inst.instruction & 0x00400000) != 0;
9563 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9564 _(":lower16: not allowed in this instruction"));
9565 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9566 _(":upper16: not allowed in this instruction"));
9567 inst.instruction |= inst.operands[0].reg << 12;
9568 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9569 {
9570 imm = inst.relocs[0].exp.X_add_number;
9571 /* The value is in two pieces: 0:11, 16:19. */
9572 inst.instruction |= (imm & 0x00000fff);
9573 inst.instruction |= (imm & 0x0000f000) << 4;
9574 }
9575 }
9576
9577 static int
9578 do_vfp_nsyn_mrs (void)
9579 {
9580 if (inst.operands[0].isvec)
9581 {
9582 if (inst.operands[1].reg != 1)
9583 first_error (_("operand 1 must be FPSCR"));
9584 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9585 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9586 do_vfp_nsyn_opcode ("fmstat");
9587 }
9588 else if (inst.operands[1].isvec)
9589 do_vfp_nsyn_opcode ("fmrx");
9590 else
9591 return FAIL;
9592
9593 return SUCCESS;
9594 }
9595
9596 static int
9597 do_vfp_nsyn_msr (void)
9598 {
9599 if (inst.operands[0].isvec)
9600 do_vfp_nsyn_opcode ("fmxr");
9601 else
9602 return FAIL;
9603
9604 return SUCCESS;
9605 }
9606
9607 static void
9608 do_vmrs (void)
9609 {
9610 unsigned Rt = inst.operands[0].reg;
9611
9612 if (thumb_mode && Rt == REG_SP)
9613 {
9614 inst.error = BAD_SP;
9615 return;
9616 }
9617
9618 /* MVFR2 is only valid at ARMv8-A. */
9619 if (inst.operands[1].reg == 5)
9620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9621 _(BAD_FPU));
9622
9623 /* APSR_ sets isvec. All other refs to PC are illegal. */
9624 if (!inst.operands[0].isvec && Rt == REG_PC)
9625 {
9626 inst.error = BAD_PC;
9627 return;
9628 }
9629
9630 /* If we get through parsing the register name, we just insert the number
9631 generated into the instruction without further validation. */
9632 inst.instruction |= (inst.operands[1].reg << 16);
9633 inst.instruction |= (Rt << 12);
9634 }
9635
9636 static void
9637 do_vmsr (void)
9638 {
9639 unsigned Rt = inst.operands[1].reg;
9640
9641 if (thumb_mode)
9642 reject_bad_reg (Rt);
9643 else if (Rt == REG_PC)
9644 {
9645 inst.error = BAD_PC;
9646 return;
9647 }
9648
9649 /* MVFR2 is only valid for ARMv8-A. */
9650 if (inst.operands[0].reg == 5)
9651 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9652 _(BAD_FPU));
9653
9654 /* If we get through parsing the register name, we just insert the number
9655 generated into the instruction without further validation. */
9656 inst.instruction |= (inst.operands[0].reg << 16);
9657 inst.instruction |= (Rt << 12);
9658 }
9659
9660 static void
9661 do_mrs (void)
9662 {
9663 unsigned br;
9664
9665 if (do_vfp_nsyn_mrs () == SUCCESS)
9666 return;
9667
9668 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9669 inst.instruction |= inst.operands[0].reg << 12;
9670
9671 if (inst.operands[1].isreg)
9672 {
9673 br = inst.operands[1].reg;
9674 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9675 as_bad (_("bad register for mrs"));
9676 }
9677 else
9678 {
9679 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9680 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9681 != (PSR_c|PSR_f),
9682 _("'APSR', 'CPSR' or 'SPSR' expected"));
9683 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9684 }
9685
9686 inst.instruction |= br;
9687 }
9688
9689 /* Two possible forms:
9690 "{C|S}PSR_<field>, Rm",
9691 "{C|S}PSR_f, #expression". */
9692
9693 static void
9694 do_msr (void)
9695 {
9696 if (do_vfp_nsyn_msr () == SUCCESS)
9697 return;
9698
9699 inst.instruction |= inst.operands[0].imm;
9700 if (inst.operands[1].isreg)
9701 inst.instruction |= inst.operands[1].reg;
9702 else
9703 {
9704 inst.instruction |= INST_IMMEDIATE;
9705 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9706 inst.relocs[0].pc_rel = 0;
9707 }
9708 }
9709
9710 static void
9711 do_mul (void)
9712 {
9713 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9714
9715 if (!inst.operands[2].present)
9716 inst.operands[2].reg = inst.operands[0].reg;
9717 inst.instruction |= inst.operands[0].reg << 16;
9718 inst.instruction |= inst.operands[1].reg;
9719 inst.instruction |= inst.operands[2].reg << 8;
9720
9721 if (inst.operands[0].reg == inst.operands[1].reg
9722 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9723 as_tsktsk (_("Rd and Rm should be different in mul"));
9724 }
9725
9726 /* Long Multiply Parser
9727 UMULL RdLo, RdHi, Rm, Rs
9728 SMULL RdLo, RdHi, Rm, Rs
9729 UMLAL RdLo, RdHi, Rm, Rs
9730 SMLAL RdLo, RdHi, Rm, Rs. */
9731
9732 static void
9733 do_mull (void)
9734 {
9735 inst.instruction |= inst.operands[0].reg << 12;
9736 inst.instruction |= inst.operands[1].reg << 16;
9737 inst.instruction |= inst.operands[2].reg;
9738 inst.instruction |= inst.operands[3].reg << 8;
9739
9740 /* rdhi and rdlo must be different. */
9741 if (inst.operands[0].reg == inst.operands[1].reg)
9742 as_tsktsk (_("rdhi and rdlo must be different"));
9743
9744 /* rdhi, rdlo and rm must all be different before armv6. */
9745 if ((inst.operands[0].reg == inst.operands[2].reg
9746 || inst.operands[1].reg == inst.operands[2].reg)
9747 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9748 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9749 }
9750
9751 static void
9752 do_nop (void)
9753 {
9754 if (inst.operands[0].present
9755 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9756 {
9757 /* Architectural NOP hints are CPSR sets with no bits selected. */
9758 inst.instruction &= 0xf0000000;
9759 inst.instruction |= 0x0320f000;
9760 if (inst.operands[0].present)
9761 inst.instruction |= inst.operands[0].imm;
9762 }
9763 }
9764
9765 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9766 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9767 Condition defaults to COND_ALWAYS.
9768 Error if Rd, Rn or Rm are R15. */
9769
9770 static void
9771 do_pkhbt (void)
9772 {
9773 inst.instruction |= inst.operands[0].reg << 12;
9774 inst.instruction |= inst.operands[1].reg << 16;
9775 inst.instruction |= inst.operands[2].reg;
9776 if (inst.operands[3].present)
9777 encode_arm_shift (3);
9778 }
9779
9780 /* ARM V6 PKHTB (Argument Parse). */
9781
9782 static void
9783 do_pkhtb (void)
9784 {
9785 if (!inst.operands[3].present)
9786 {
9787 /* If the shift specifier is omitted, turn the instruction
9788 into pkhbt rd, rm, rn. */
9789 inst.instruction &= 0xfff00010;
9790 inst.instruction |= inst.operands[0].reg << 12;
9791 inst.instruction |= inst.operands[1].reg;
9792 inst.instruction |= inst.operands[2].reg << 16;
9793 }
9794 else
9795 {
9796 inst.instruction |= inst.operands[0].reg << 12;
9797 inst.instruction |= inst.operands[1].reg << 16;
9798 inst.instruction |= inst.operands[2].reg;
9799 encode_arm_shift (3);
9800 }
9801 }
9802
9803 /* ARMv5TE: Preload-Cache
9804 MP Extensions: Preload for write
9805
9806 PLD(W) <addr_mode>
9807
9808 Syntactically, like LDR with B=1, W=0, L=1. */
9809
9810 static void
9811 do_pld (void)
9812 {
9813 constraint (!inst.operands[0].isreg,
9814 _("'[' expected after PLD mnemonic"));
9815 constraint (inst.operands[0].postind,
9816 _("post-indexed expression used in preload instruction"));
9817 constraint (inst.operands[0].writeback,
9818 _("writeback used in preload instruction"));
9819 constraint (!inst.operands[0].preind,
9820 _("unindexed addressing used in preload instruction"));
9821 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9822 }
9823
9824 /* ARMv7: PLI <addr_mode> */
9825 static void
9826 do_pli (void)
9827 {
9828 constraint (!inst.operands[0].isreg,
9829 _("'[' expected after PLI mnemonic"));
9830 constraint (inst.operands[0].postind,
9831 _("post-indexed expression used in preload instruction"));
9832 constraint (inst.operands[0].writeback,
9833 _("writeback used in preload instruction"));
9834 constraint (!inst.operands[0].preind,
9835 _("unindexed addressing used in preload instruction"));
9836 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9837 inst.instruction &= ~PRE_INDEX;
9838 }
9839
9840 static void
9841 do_push_pop (void)
9842 {
9843 constraint (inst.operands[0].writeback,
9844 _("push/pop do not support {reglist}^"));
9845 inst.operands[1] = inst.operands[0];
9846 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9847 inst.operands[0].isreg = 1;
9848 inst.operands[0].writeback = 1;
9849 inst.operands[0].reg = REG_SP;
9850 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9851 }
9852
9853 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9854 word at the specified address and the following word
9855 respectively.
9856 Unconditionally executed.
9857 Error if Rn is R15. */
9858
9859 static void
9860 do_rfe (void)
9861 {
9862 inst.instruction |= inst.operands[0].reg << 16;
9863 if (inst.operands[0].writeback)
9864 inst.instruction |= WRITE_BACK;
9865 }
9866
9867 /* ARM V6 ssat (argument parse). */
9868
9869 static void
9870 do_ssat (void)
9871 {
9872 inst.instruction |= inst.operands[0].reg << 12;
9873 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9874 inst.instruction |= inst.operands[2].reg;
9875
9876 if (inst.operands[3].present)
9877 encode_arm_shift (3);
9878 }
9879
9880 /* ARM V6 usat (argument parse). */
9881
9882 static void
9883 do_usat (void)
9884 {
9885 inst.instruction |= inst.operands[0].reg << 12;
9886 inst.instruction |= inst.operands[1].imm << 16;
9887 inst.instruction |= inst.operands[2].reg;
9888
9889 if (inst.operands[3].present)
9890 encode_arm_shift (3);
9891 }
9892
9893 /* ARM V6 ssat16 (argument parse). */
9894
9895 static void
9896 do_ssat16 (void)
9897 {
9898 inst.instruction |= inst.operands[0].reg << 12;
9899 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9900 inst.instruction |= inst.operands[2].reg;
9901 }
9902
9903 static void
9904 do_usat16 (void)
9905 {
9906 inst.instruction |= inst.operands[0].reg << 12;
9907 inst.instruction |= inst.operands[1].imm << 16;
9908 inst.instruction |= inst.operands[2].reg;
9909 }
9910
9911 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9912 preserving the other bits.
9913
9914 setend <endian_specifier>, where <endian_specifier> is either
9915 BE or LE. */
9916
9917 static void
9918 do_setend (void)
9919 {
9920 if (warn_on_deprecated
9921 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9922 as_tsktsk (_("setend use is deprecated for ARMv8"));
9923
9924 if (inst.operands[0].imm)
9925 inst.instruction |= 0x200;
9926 }
9927
9928 static void
9929 do_shift (void)
9930 {
9931 unsigned int Rm = (inst.operands[1].present
9932 ? inst.operands[1].reg
9933 : inst.operands[0].reg);
9934
9935 inst.instruction |= inst.operands[0].reg << 12;
9936 inst.instruction |= Rm;
9937 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9938 {
9939 inst.instruction |= inst.operands[2].reg << 8;
9940 inst.instruction |= SHIFT_BY_REG;
9941 /* PR 12854: Error on extraneous shifts. */
9942 constraint (inst.operands[2].shifted,
9943 _("extraneous shift as part of operand to shift insn"));
9944 }
9945 else
9946 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9947 }
9948
9949 static void
9950 do_smc (void)
9951 {
9952 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9953 inst.relocs[0].pc_rel = 0;
9954 }
9955
9956 static void
9957 do_hvc (void)
9958 {
9959 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9960 inst.relocs[0].pc_rel = 0;
9961 }
9962
9963 static void
9964 do_swi (void)
9965 {
9966 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9967 inst.relocs[0].pc_rel = 0;
9968 }
9969
9970 static void
9971 do_setpan (void)
9972 {
9973 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9974 _("selected processor does not support SETPAN instruction"));
9975
9976 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9977 }
9978
9979 static void
9980 do_t_setpan (void)
9981 {
9982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9983 _("selected processor does not support SETPAN instruction"));
9984
9985 inst.instruction |= (inst.operands[0].imm << 3);
9986 }
9987
9988 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9989 SMLAxy{cond} Rd,Rm,Rs,Rn
9990 SMLAWy{cond} Rd,Rm,Rs,Rn
9991 Error if any register is R15. */
9992
9993 static void
9994 do_smla (void)
9995 {
9996 inst.instruction |= inst.operands[0].reg << 16;
9997 inst.instruction |= inst.operands[1].reg;
9998 inst.instruction |= inst.operands[2].reg << 8;
9999 inst.instruction |= inst.operands[3].reg << 12;
10000 }
10001
10002 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10003 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10004 Error if any register is R15.
10005 Warning if Rdlo == Rdhi. */
10006
10007 static void
10008 do_smlal (void)
10009 {
10010 inst.instruction |= inst.operands[0].reg << 12;
10011 inst.instruction |= inst.operands[1].reg << 16;
10012 inst.instruction |= inst.operands[2].reg;
10013 inst.instruction |= inst.operands[3].reg << 8;
10014
10015 if (inst.operands[0].reg == inst.operands[1].reg)
10016 as_tsktsk (_("rdhi and rdlo must be different"));
10017 }
10018
10019 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10020 SMULxy{cond} Rd,Rm,Rs
10021 Error if any register is R15. */
10022
10023 static void
10024 do_smul (void)
10025 {
10026 inst.instruction |= inst.operands[0].reg << 16;
10027 inst.instruction |= inst.operands[1].reg;
10028 inst.instruction |= inst.operands[2].reg << 8;
10029 }
10030
10031 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10032 the same for both ARM and Thumb-2. */
10033
10034 static void
10035 do_srs (void)
10036 {
10037 int reg;
10038
10039 if (inst.operands[0].present)
10040 {
10041 reg = inst.operands[0].reg;
10042 constraint (reg != REG_SP, _("SRS base register must be r13"));
10043 }
10044 else
10045 reg = REG_SP;
10046
10047 inst.instruction |= reg << 16;
10048 inst.instruction |= inst.operands[1].imm;
10049 if (inst.operands[0].writeback || inst.operands[1].writeback)
10050 inst.instruction |= WRITE_BACK;
10051 }
10052
10053 /* ARM V6 strex (argument parse). */
10054
10055 static void
10056 do_strex (void)
10057 {
10058 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10059 || inst.operands[2].postind || inst.operands[2].writeback
10060 || inst.operands[2].immisreg || inst.operands[2].shifted
10061 || inst.operands[2].negative
10062 /* See comment in do_ldrex(). */
10063 || (inst.operands[2].reg == REG_PC),
10064 BAD_ADDR_MODE);
10065
10066 constraint (inst.operands[0].reg == inst.operands[1].reg
10067 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10068
10069 constraint (inst.relocs[0].exp.X_op != O_constant
10070 || inst.relocs[0].exp.X_add_number != 0,
10071 _("offset must be zero in ARM encoding"));
10072
10073 inst.instruction |= inst.operands[0].reg << 12;
10074 inst.instruction |= inst.operands[1].reg;
10075 inst.instruction |= inst.operands[2].reg << 16;
10076 inst.relocs[0].type = BFD_RELOC_UNUSED;
10077 }
10078
10079 static void
10080 do_t_strexbh (void)
10081 {
10082 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10083 || inst.operands[2].postind || inst.operands[2].writeback
10084 || inst.operands[2].immisreg || inst.operands[2].shifted
10085 || inst.operands[2].negative,
10086 BAD_ADDR_MODE);
10087
10088 constraint (inst.operands[0].reg == inst.operands[1].reg
10089 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10090
10091 do_rm_rd_rn ();
10092 }
10093
10094 static void
10095 do_strexd (void)
10096 {
10097 constraint (inst.operands[1].reg % 2 != 0,
10098 _("even register required"));
10099 constraint (inst.operands[2].present
10100 && inst.operands[2].reg != inst.operands[1].reg + 1,
10101 _("can only store two consecutive registers"));
10102 /* If op 2 were present and equal to PC, this function wouldn't
10103 have been called in the first place. */
10104 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10105
10106 constraint (inst.operands[0].reg == inst.operands[1].reg
10107 || inst.operands[0].reg == inst.operands[1].reg + 1
10108 || inst.operands[0].reg == inst.operands[3].reg,
10109 BAD_OVERLAP);
10110
10111 inst.instruction |= inst.operands[0].reg << 12;
10112 inst.instruction |= inst.operands[1].reg;
10113 inst.instruction |= inst.operands[3].reg << 16;
10114 }
10115
10116 /* ARM V8 STRL. */
10117 static void
10118 do_stlex (void)
10119 {
10120 constraint (inst.operands[0].reg == inst.operands[1].reg
10121 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10122
10123 do_rd_rm_rn ();
10124 }
10125
10126 static void
10127 do_t_stlex (void)
10128 {
10129 constraint (inst.operands[0].reg == inst.operands[1].reg
10130 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10131
10132 do_rm_rd_rn ();
10133 }
10134
10135 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10136 extends it to 32-bits, and adds the result to a value in another
10137 register. You can specify a rotation by 0, 8, 16, or 24 bits
10138 before extracting the 16-bit value.
10139 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10140 Condition defaults to COND_ALWAYS.
10141 Error if any register uses R15. */
10142
10143 static void
10144 do_sxtah (void)
10145 {
10146 inst.instruction |= inst.operands[0].reg << 12;
10147 inst.instruction |= inst.operands[1].reg << 16;
10148 inst.instruction |= inst.operands[2].reg;
10149 inst.instruction |= inst.operands[3].imm << 10;
10150 }
10151
10152 /* ARM V6 SXTH.
10153
10154 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10155 Condition defaults to COND_ALWAYS.
10156 Error if any register uses R15. */
10157
10158 static void
10159 do_sxth (void)
10160 {
10161 inst.instruction |= inst.operands[0].reg << 12;
10162 inst.instruction |= inst.operands[1].reg;
10163 inst.instruction |= inst.operands[2].imm << 10;
10164 }
10165 \f
10166 /* VFP instructions. In a logical order: SP variant first, monad
10167 before dyad, arithmetic then move then load/store. */
10168
10169 static void
10170 do_vfp_sp_monadic (void)
10171 {
10172 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10173 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10174 }
10175
10176 static void
10177 do_vfp_sp_dyadic (void)
10178 {
10179 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10180 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10181 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10182 }
10183
10184 static void
10185 do_vfp_sp_compare_z (void)
10186 {
10187 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10188 }
10189
10190 static void
10191 do_vfp_dp_sp_cvt (void)
10192 {
10193 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10194 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10195 }
10196
10197 static void
10198 do_vfp_sp_dp_cvt (void)
10199 {
10200 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10201 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10202 }
10203
10204 static void
10205 do_vfp_reg_from_sp (void)
10206 {
10207 inst.instruction |= inst.operands[0].reg << 12;
10208 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10209 }
10210
10211 static void
10212 do_vfp_reg2_from_sp2 (void)
10213 {
10214 constraint (inst.operands[2].imm != 2,
10215 _("only two consecutive VFP SP registers allowed here"));
10216 inst.instruction |= inst.operands[0].reg << 12;
10217 inst.instruction |= inst.operands[1].reg << 16;
10218 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10219 }
10220
10221 static void
10222 do_vfp_sp_from_reg (void)
10223 {
10224 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10225 inst.instruction |= inst.operands[1].reg << 12;
10226 }
10227
10228 static void
10229 do_vfp_sp2_from_reg2 (void)
10230 {
10231 constraint (inst.operands[0].imm != 2,
10232 _("only two consecutive VFP SP registers allowed here"));
10233 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10234 inst.instruction |= inst.operands[1].reg << 12;
10235 inst.instruction |= inst.operands[2].reg << 16;
10236 }
10237
10238 static void
10239 do_vfp_sp_ldst (void)
10240 {
10241 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10242 encode_arm_cp_address (1, FALSE, TRUE, 0);
10243 }
10244
10245 static void
10246 do_vfp_dp_ldst (void)
10247 {
10248 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10249 encode_arm_cp_address (1, FALSE, TRUE, 0);
10250 }
10251
10252
10253 static void
10254 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10255 {
10256 if (inst.operands[0].writeback)
10257 inst.instruction |= WRITE_BACK;
10258 else
10259 constraint (ldstm_type != VFP_LDSTMIA,
10260 _("this addressing mode requires base-register writeback"));
10261 inst.instruction |= inst.operands[0].reg << 16;
10262 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10263 inst.instruction |= inst.operands[1].imm;
10264 }
10265
10266 static void
10267 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10268 {
10269 int count;
10270
10271 if (inst.operands[0].writeback)
10272 inst.instruction |= WRITE_BACK;
10273 else
10274 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10275 _("this addressing mode requires base-register writeback"));
10276
10277 inst.instruction |= inst.operands[0].reg << 16;
10278 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10279
10280 count = inst.operands[1].imm << 1;
10281 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10282 count += 1;
10283
10284 inst.instruction |= count;
10285 }
10286
10287 static void
10288 do_vfp_sp_ldstmia (void)
10289 {
10290 vfp_sp_ldstm (VFP_LDSTMIA);
10291 }
10292
10293 static void
10294 do_vfp_sp_ldstmdb (void)
10295 {
10296 vfp_sp_ldstm (VFP_LDSTMDB);
10297 }
10298
10299 static void
10300 do_vfp_dp_ldstmia (void)
10301 {
10302 vfp_dp_ldstm (VFP_LDSTMIA);
10303 }
10304
10305 static void
10306 do_vfp_dp_ldstmdb (void)
10307 {
10308 vfp_dp_ldstm (VFP_LDSTMDB);
10309 }
10310
10311 static void
10312 do_vfp_xp_ldstmia (void)
10313 {
10314 vfp_dp_ldstm (VFP_LDSTMIAX);
10315 }
10316
10317 static void
10318 do_vfp_xp_ldstmdb (void)
10319 {
10320 vfp_dp_ldstm (VFP_LDSTMDBX);
10321 }
10322
10323 static void
10324 do_vfp_dp_rd_rm (void)
10325 {
10326 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10327 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10328 }
10329
10330 static void
10331 do_vfp_dp_rn_rd (void)
10332 {
10333 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10334 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10335 }
10336
10337 static void
10338 do_vfp_dp_rd_rn (void)
10339 {
10340 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10341 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10342 }
10343
10344 static void
10345 do_vfp_dp_rd_rn_rm (void)
10346 {
10347 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10348 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10349 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10350 }
10351
10352 static void
10353 do_vfp_dp_rd (void)
10354 {
10355 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10356 }
10357
10358 static void
10359 do_vfp_dp_rm_rd_rn (void)
10360 {
10361 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10362 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10363 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10364 }
10365
10366 /* VFPv3 instructions. */
10367 static void
10368 do_vfp_sp_const (void)
10369 {
10370 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10371 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10372 inst.instruction |= (inst.operands[1].imm & 0x0f);
10373 }
10374
10375 static void
10376 do_vfp_dp_const (void)
10377 {
10378 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10379 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10380 inst.instruction |= (inst.operands[1].imm & 0x0f);
10381 }
10382
10383 static void
10384 vfp_conv (int srcsize)
10385 {
10386 int immbits = srcsize - inst.operands[1].imm;
10387
10388 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10389 {
10390 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10391 i.e. immbits must be in range 0 - 16. */
10392 inst.error = _("immediate value out of range, expected range [0, 16]");
10393 return;
10394 }
10395 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10396 {
10397 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10398 i.e. immbits must be in range 0 - 31. */
10399 inst.error = _("immediate value out of range, expected range [1, 32]");
10400 return;
10401 }
10402
10403 inst.instruction |= (immbits & 1) << 5;
10404 inst.instruction |= (immbits >> 1);
10405 }
10406
10407 static void
10408 do_vfp_sp_conv_16 (void)
10409 {
10410 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10411 vfp_conv (16);
10412 }
10413
10414 static void
10415 do_vfp_dp_conv_16 (void)
10416 {
10417 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10418 vfp_conv (16);
10419 }
10420
10421 static void
10422 do_vfp_sp_conv_32 (void)
10423 {
10424 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10425 vfp_conv (32);
10426 }
10427
10428 static void
10429 do_vfp_dp_conv_32 (void)
10430 {
10431 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10432 vfp_conv (32);
10433 }
10434 \f
10435 /* FPA instructions. Also in a logical order. */
10436
10437 static void
10438 do_fpa_cmp (void)
10439 {
10440 inst.instruction |= inst.operands[0].reg << 16;
10441 inst.instruction |= inst.operands[1].reg;
10442 }
10443
10444 static void
10445 do_fpa_ldmstm (void)
10446 {
10447 inst.instruction |= inst.operands[0].reg << 12;
10448 switch (inst.operands[1].imm)
10449 {
10450 case 1: inst.instruction |= CP_T_X; break;
10451 case 2: inst.instruction |= CP_T_Y; break;
10452 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10453 case 4: break;
10454 default: abort ();
10455 }
10456
10457 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10458 {
10459 /* The instruction specified "ea" or "fd", so we can only accept
10460 [Rn]{!}. The instruction does not really support stacking or
10461 unstacking, so we have to emulate these by setting appropriate
10462 bits and offsets. */
10463 constraint (inst.relocs[0].exp.X_op != O_constant
10464 || inst.relocs[0].exp.X_add_number != 0,
10465 _("this instruction does not support indexing"));
10466
10467 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10468 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10469
10470 if (!(inst.instruction & INDEX_UP))
10471 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10472
10473 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10474 {
10475 inst.operands[2].preind = 0;
10476 inst.operands[2].postind = 1;
10477 }
10478 }
10479
10480 encode_arm_cp_address (2, TRUE, TRUE, 0);
10481 }
10482 \f
10483 /* iWMMXt instructions: strictly in alphabetical order. */
10484
10485 static void
10486 do_iwmmxt_tandorc (void)
10487 {
10488 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10489 }
10490
10491 static void
10492 do_iwmmxt_textrc (void)
10493 {
10494 inst.instruction |= inst.operands[0].reg << 12;
10495 inst.instruction |= inst.operands[1].imm;
10496 }
10497
10498 static void
10499 do_iwmmxt_textrm (void)
10500 {
10501 inst.instruction |= inst.operands[0].reg << 12;
10502 inst.instruction |= inst.operands[1].reg << 16;
10503 inst.instruction |= inst.operands[2].imm;
10504 }
10505
10506 static void
10507 do_iwmmxt_tinsr (void)
10508 {
10509 inst.instruction |= inst.operands[0].reg << 16;
10510 inst.instruction |= inst.operands[1].reg << 12;
10511 inst.instruction |= inst.operands[2].imm;
10512 }
10513
10514 static void
10515 do_iwmmxt_tmia (void)
10516 {
10517 inst.instruction |= inst.operands[0].reg << 5;
10518 inst.instruction |= inst.operands[1].reg;
10519 inst.instruction |= inst.operands[2].reg << 12;
10520 }
10521
10522 static void
10523 do_iwmmxt_waligni (void)
10524 {
10525 inst.instruction |= inst.operands[0].reg << 12;
10526 inst.instruction |= inst.operands[1].reg << 16;
10527 inst.instruction |= inst.operands[2].reg;
10528 inst.instruction |= inst.operands[3].imm << 20;
10529 }
10530
10531 static void
10532 do_iwmmxt_wmerge (void)
10533 {
10534 inst.instruction |= inst.operands[0].reg << 12;
10535 inst.instruction |= inst.operands[1].reg << 16;
10536 inst.instruction |= inst.operands[2].reg;
10537 inst.instruction |= inst.operands[3].imm << 21;
10538 }
10539
10540 static void
10541 do_iwmmxt_wmov (void)
10542 {
10543 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10544 inst.instruction |= inst.operands[0].reg << 12;
10545 inst.instruction |= inst.operands[1].reg << 16;
10546 inst.instruction |= inst.operands[1].reg;
10547 }
10548
10549 static void
10550 do_iwmmxt_wldstbh (void)
10551 {
10552 int reloc;
10553 inst.instruction |= inst.operands[0].reg << 12;
10554 if (thumb_mode)
10555 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10556 else
10557 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10558 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10559 }
10560
10561 static void
10562 do_iwmmxt_wldstw (void)
10563 {
10564 /* RIWR_RIWC clears .isreg for a control register. */
10565 if (!inst.operands[0].isreg)
10566 {
10567 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10568 inst.instruction |= 0xf0000000;
10569 }
10570
10571 inst.instruction |= inst.operands[0].reg << 12;
10572 encode_arm_cp_address (1, TRUE, TRUE, 0);
10573 }
10574
10575 static void
10576 do_iwmmxt_wldstd (void)
10577 {
10578 inst.instruction |= inst.operands[0].reg << 12;
10579 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10580 && inst.operands[1].immisreg)
10581 {
10582 inst.instruction &= ~0x1a000ff;
10583 inst.instruction |= (0xfU << 28);
10584 if (inst.operands[1].preind)
10585 inst.instruction |= PRE_INDEX;
10586 if (!inst.operands[1].negative)
10587 inst.instruction |= INDEX_UP;
10588 if (inst.operands[1].writeback)
10589 inst.instruction |= WRITE_BACK;
10590 inst.instruction |= inst.operands[1].reg << 16;
10591 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10592 inst.instruction |= inst.operands[1].imm;
10593 }
10594 else
10595 encode_arm_cp_address (1, TRUE, FALSE, 0);
10596 }
10597
10598 static void
10599 do_iwmmxt_wshufh (void)
10600 {
10601 inst.instruction |= inst.operands[0].reg << 12;
10602 inst.instruction |= inst.operands[1].reg << 16;
10603 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10604 inst.instruction |= (inst.operands[2].imm & 0x0f);
10605 }
10606
10607 static void
10608 do_iwmmxt_wzero (void)
10609 {
10610 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10611 inst.instruction |= inst.operands[0].reg;
10612 inst.instruction |= inst.operands[0].reg << 12;
10613 inst.instruction |= inst.operands[0].reg << 16;
10614 }
10615
10616 static void
10617 do_iwmmxt_wrwrwr_or_imm5 (void)
10618 {
10619 if (inst.operands[2].isreg)
10620 do_rd_rn_rm ();
10621 else {
10622 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10623 _("immediate operand requires iWMMXt2"));
10624 do_rd_rn ();
10625 if (inst.operands[2].imm == 0)
10626 {
10627 switch ((inst.instruction >> 20) & 0xf)
10628 {
10629 case 4:
10630 case 5:
10631 case 6:
10632 case 7:
10633 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10634 inst.operands[2].imm = 16;
10635 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10636 break;
10637 case 8:
10638 case 9:
10639 case 10:
10640 case 11:
10641 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10642 inst.operands[2].imm = 32;
10643 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10644 break;
10645 case 12:
10646 case 13:
10647 case 14:
10648 case 15:
10649 {
10650 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10651 unsigned long wrn;
10652 wrn = (inst.instruction >> 16) & 0xf;
10653 inst.instruction &= 0xff0fff0f;
10654 inst.instruction |= wrn;
10655 /* Bail out here; the instruction is now assembled. */
10656 return;
10657 }
10658 }
10659 }
10660 /* Map 32 -> 0, etc. */
10661 inst.operands[2].imm &= 0x1f;
10662 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10663 }
10664 }
10665 \f
10666 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10667 operations first, then control, shift, and load/store. */
10668
10669 /* Insns like "foo X,Y,Z". */
10670
10671 static void
10672 do_mav_triple (void)
10673 {
10674 inst.instruction |= inst.operands[0].reg << 16;
10675 inst.instruction |= inst.operands[1].reg;
10676 inst.instruction |= inst.operands[2].reg << 12;
10677 }
10678
10679 /* Insns like "foo W,X,Y,Z".
10680 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10681
10682 static void
10683 do_mav_quad (void)
10684 {
10685 inst.instruction |= inst.operands[0].reg << 5;
10686 inst.instruction |= inst.operands[1].reg << 12;
10687 inst.instruction |= inst.operands[2].reg << 16;
10688 inst.instruction |= inst.operands[3].reg;
10689 }
10690
10691 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10692 static void
10693 do_mav_dspsc (void)
10694 {
10695 inst.instruction |= inst.operands[1].reg << 12;
10696 }
10697
10698 /* Maverick shift immediate instructions.
10699 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10700 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10701
10702 static void
10703 do_mav_shift (void)
10704 {
10705 int imm = inst.operands[2].imm;
10706
10707 inst.instruction |= inst.operands[0].reg << 12;
10708 inst.instruction |= inst.operands[1].reg << 16;
10709
10710 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10711 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10712 Bit 4 should be 0. */
10713 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10714
10715 inst.instruction |= imm;
10716 }
10717 \f
10718 /* XScale instructions. Also sorted arithmetic before move. */
10719
10720 /* Xscale multiply-accumulate (argument parse)
10721 MIAcc acc0,Rm,Rs
10722 MIAPHcc acc0,Rm,Rs
10723 MIAxycc acc0,Rm,Rs. */
10724
10725 static void
10726 do_xsc_mia (void)
10727 {
10728 inst.instruction |= inst.operands[1].reg;
10729 inst.instruction |= inst.operands[2].reg << 12;
10730 }
10731
10732 /* Xscale move-accumulator-register (argument parse)
10733
10734 MARcc acc0,RdLo,RdHi. */
10735
10736 static void
10737 do_xsc_mar (void)
10738 {
10739 inst.instruction |= inst.operands[1].reg << 12;
10740 inst.instruction |= inst.operands[2].reg << 16;
10741 }
10742
10743 /* Xscale move-register-accumulator (argument parse)
10744
10745 MRAcc RdLo,RdHi,acc0. */
10746
10747 static void
10748 do_xsc_mra (void)
10749 {
10750 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10751 inst.instruction |= inst.operands[0].reg << 12;
10752 inst.instruction |= inst.operands[1].reg << 16;
10753 }
10754 \f
10755 /* Encoding functions relevant only to Thumb. */
10756
10757 /* inst.operands[i] is a shifted-register operand; encode
10758 it into inst.instruction in the format used by Thumb32. */
10759
10760 static void
10761 encode_thumb32_shifted_operand (int i)
10762 {
10763 unsigned int value = inst.relocs[0].exp.X_add_number;
10764 unsigned int shift = inst.operands[i].shift_kind;
10765
10766 constraint (inst.operands[i].immisreg,
10767 _("shift by register not allowed in thumb mode"));
10768 inst.instruction |= inst.operands[i].reg;
10769 if (shift == SHIFT_RRX)
10770 inst.instruction |= SHIFT_ROR << 4;
10771 else
10772 {
10773 constraint (inst.relocs[0].exp.X_op != O_constant,
10774 _("expression too complex"));
10775
10776 constraint (value > 32
10777 || (value == 32 && (shift == SHIFT_LSL
10778 || shift == SHIFT_ROR)),
10779 _("shift expression is too large"));
10780
10781 if (value == 0)
10782 shift = SHIFT_LSL;
10783 else if (value == 32)
10784 value = 0;
10785
10786 inst.instruction |= shift << 4;
10787 inst.instruction |= (value & 0x1c) << 10;
10788 inst.instruction |= (value & 0x03) << 6;
10789 }
10790 }
10791
10792
10793 /* inst.operands[i] was set up by parse_address. Encode it into a
10794 Thumb32 format load or store instruction. Reject forms that cannot
10795 be used with such instructions. If is_t is true, reject forms that
10796 cannot be used with a T instruction; if is_d is true, reject forms
10797 that cannot be used with a D instruction. If it is a store insn,
10798 reject PC in Rn. */
10799
10800 static void
10801 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10802 {
10803 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10804
10805 constraint (!inst.operands[i].isreg,
10806 _("Instruction does not support =N addresses"));
10807
10808 inst.instruction |= inst.operands[i].reg << 16;
10809 if (inst.operands[i].immisreg)
10810 {
10811 constraint (is_pc, BAD_PC_ADDRESSING);
10812 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10813 constraint (inst.operands[i].negative,
10814 _("Thumb does not support negative register indexing"));
10815 constraint (inst.operands[i].postind,
10816 _("Thumb does not support register post-indexing"));
10817 constraint (inst.operands[i].writeback,
10818 _("Thumb does not support register indexing with writeback"));
10819 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10820 _("Thumb supports only LSL in shifted register indexing"));
10821
10822 inst.instruction |= inst.operands[i].imm;
10823 if (inst.operands[i].shifted)
10824 {
10825 constraint (inst.relocs[0].exp.X_op != O_constant,
10826 _("expression too complex"));
10827 constraint (inst.relocs[0].exp.X_add_number < 0
10828 || inst.relocs[0].exp.X_add_number > 3,
10829 _("shift out of range"));
10830 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10831 }
10832 inst.relocs[0].type = BFD_RELOC_UNUSED;
10833 }
10834 else if (inst.operands[i].preind)
10835 {
10836 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10837 constraint (is_t && inst.operands[i].writeback,
10838 _("cannot use writeback with this instruction"));
10839 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10840 BAD_PC_ADDRESSING);
10841
10842 if (is_d)
10843 {
10844 inst.instruction |= 0x01000000;
10845 if (inst.operands[i].writeback)
10846 inst.instruction |= 0x00200000;
10847 }
10848 else
10849 {
10850 inst.instruction |= 0x00000c00;
10851 if (inst.operands[i].writeback)
10852 inst.instruction |= 0x00000100;
10853 }
10854 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10855 }
10856 else if (inst.operands[i].postind)
10857 {
10858 gas_assert (inst.operands[i].writeback);
10859 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10860 constraint (is_t, _("cannot use post-indexing with this instruction"));
10861
10862 if (is_d)
10863 inst.instruction |= 0x00200000;
10864 else
10865 inst.instruction |= 0x00000900;
10866 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10867 }
10868 else /* unindexed - only for coprocessor */
10869 inst.error = _("instruction does not accept unindexed addressing");
10870 }
10871
10872 /* Table of Thumb instructions which exist in both 16- and 32-bit
10873 encodings (the latter only in post-V6T2 cores). The index is the
10874 value used in the insns table below. When there is more than one
10875 possible 16-bit encoding for the instruction, this table always
10876 holds variant (1).
10877 Also contains several pseudo-instructions used during relaxation. */
10878 #define T16_32_TAB \
10879 X(_adc, 4140, eb400000), \
10880 X(_adcs, 4140, eb500000), \
10881 X(_add, 1c00, eb000000), \
10882 X(_adds, 1c00, eb100000), \
10883 X(_addi, 0000, f1000000), \
10884 X(_addis, 0000, f1100000), \
10885 X(_add_pc,000f, f20f0000), \
10886 X(_add_sp,000d, f10d0000), \
10887 X(_adr, 000f, f20f0000), \
10888 X(_and, 4000, ea000000), \
10889 X(_ands, 4000, ea100000), \
10890 X(_asr, 1000, fa40f000), \
10891 X(_asrs, 1000, fa50f000), \
10892 X(_b, e000, f000b000), \
10893 X(_bcond, d000, f0008000), \
10894 X(_bf, 0000, f040e001), \
10895 X(_bfcsel,0000, f000e001), \
10896 X(_bfx, 0000, f060e001), \
10897 X(_bfl, 0000, f000c001), \
10898 X(_bflx, 0000, f070e001), \
10899 X(_bic, 4380, ea200000), \
10900 X(_bics, 4380, ea300000), \
10901 X(_cmn, 42c0, eb100f00), \
10902 X(_cmp, 2800, ebb00f00), \
10903 X(_cpsie, b660, f3af8400), \
10904 X(_cpsid, b670, f3af8600), \
10905 X(_cpy, 4600, ea4f0000), \
10906 X(_dec_sp,80dd, f1ad0d00), \
10907 X(_dls, 0000, f040e001), \
10908 X(_eor, 4040, ea800000), \
10909 X(_eors, 4040, ea900000), \
10910 X(_inc_sp,00dd, f10d0d00), \
10911 X(_ldmia, c800, e8900000), \
10912 X(_ldr, 6800, f8500000), \
10913 X(_ldrb, 7800, f8100000), \
10914 X(_ldrh, 8800, f8300000), \
10915 X(_ldrsb, 5600, f9100000), \
10916 X(_ldrsh, 5e00, f9300000), \
10917 X(_ldr_pc,4800, f85f0000), \
10918 X(_ldr_pc2,4800, f85f0000), \
10919 X(_ldr_sp,9800, f85d0000), \
10920 X(_le, 0000, f00fc001), \
10921 X(_lsl, 0000, fa00f000), \
10922 X(_lsls, 0000, fa10f000), \
10923 X(_lsr, 0800, fa20f000), \
10924 X(_lsrs, 0800, fa30f000), \
10925 X(_mov, 2000, ea4f0000), \
10926 X(_movs, 2000, ea5f0000), \
10927 X(_mul, 4340, fb00f000), \
10928 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10929 X(_mvn, 43c0, ea6f0000), \
10930 X(_mvns, 43c0, ea7f0000), \
10931 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10932 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10933 X(_orr, 4300, ea400000), \
10934 X(_orrs, 4300, ea500000), \
10935 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10936 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10937 X(_rev, ba00, fa90f080), \
10938 X(_rev16, ba40, fa90f090), \
10939 X(_revsh, bac0, fa90f0b0), \
10940 X(_ror, 41c0, fa60f000), \
10941 X(_rors, 41c0, fa70f000), \
10942 X(_sbc, 4180, eb600000), \
10943 X(_sbcs, 4180, eb700000), \
10944 X(_stmia, c000, e8800000), \
10945 X(_str, 6000, f8400000), \
10946 X(_strb, 7000, f8000000), \
10947 X(_strh, 8000, f8200000), \
10948 X(_str_sp,9000, f84d0000), \
10949 X(_sub, 1e00, eba00000), \
10950 X(_subs, 1e00, ebb00000), \
10951 X(_subi, 8000, f1a00000), \
10952 X(_subis, 8000, f1b00000), \
10953 X(_sxtb, b240, fa4ff080), \
10954 X(_sxth, b200, fa0ff080), \
10955 X(_tst, 4200, ea100f00), \
10956 X(_uxtb, b2c0, fa5ff080), \
10957 X(_uxth, b280, fa1ff080), \
10958 X(_nop, bf00, f3af8000), \
10959 X(_yield, bf10, f3af8001), \
10960 X(_wfe, bf20, f3af8002), \
10961 X(_wfi, bf30, f3af8003), \
10962 X(_wls, 0000, f040c001), \
10963 X(_sev, bf40, f3af8004), \
10964 X(_sevl, bf50, f3af8005), \
10965 X(_udf, de00, f7f0a000)
10966
10967 /* To catch errors in encoding functions, the codes are all offset by
10968 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10969 as 16-bit instructions. */
10970 #define X(a,b,c) T_MNEM##a
10971 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10972 #undef X
10973
10974 #define X(a,b,c) 0x##b
10975 static const unsigned short thumb_op16[] = { T16_32_TAB };
10976 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10977 #undef X
10978
10979 #define X(a,b,c) 0x##c
10980 static const unsigned int thumb_op32[] = { T16_32_TAB };
10981 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10982 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10983 #undef X
10984 #undef T16_32_TAB
10985
10986 /* Thumb instruction encoders, in alphabetical order. */
10987
10988 /* ADDW or SUBW. */
10989
10990 static void
10991 do_t_add_sub_w (void)
10992 {
10993 int Rd, Rn;
10994
10995 Rd = inst.operands[0].reg;
10996 Rn = inst.operands[1].reg;
10997
10998 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10999 is the SP-{plus,minus}-immediate form of the instruction. */
11000 if (Rn == REG_SP)
11001 constraint (Rd == REG_PC, BAD_PC);
11002 else
11003 reject_bad_reg (Rd);
11004
11005 inst.instruction |= (Rn << 16) | (Rd << 8);
11006 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11007 }
11008
11009 /* Parse an add or subtract instruction. We get here with inst.instruction
11010 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11011
11012 static void
11013 do_t_add_sub (void)
11014 {
11015 int Rd, Rs, Rn;
11016
11017 Rd = inst.operands[0].reg;
11018 Rs = (inst.operands[1].present
11019 ? inst.operands[1].reg /* Rd, Rs, foo */
11020 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11021
11022 if (Rd == REG_PC)
11023 set_pred_insn_type_last ();
11024
11025 if (unified_syntax)
11026 {
11027 bfd_boolean flags;
11028 bfd_boolean narrow;
11029 int opcode;
11030
11031 flags = (inst.instruction == T_MNEM_adds
11032 || inst.instruction == T_MNEM_subs);
11033 if (flags)
11034 narrow = !in_pred_block ();
11035 else
11036 narrow = in_pred_block ();
11037 if (!inst.operands[2].isreg)
11038 {
11039 int add;
11040
11041 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11042 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11043
11044 add = (inst.instruction == T_MNEM_add
11045 || inst.instruction == T_MNEM_adds);
11046 opcode = 0;
11047 if (inst.size_req != 4)
11048 {
11049 /* Attempt to use a narrow opcode, with relaxation if
11050 appropriate. */
11051 if (Rd == REG_SP && Rs == REG_SP && !flags)
11052 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11053 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11054 opcode = T_MNEM_add_sp;
11055 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11056 opcode = T_MNEM_add_pc;
11057 else if (Rd <= 7 && Rs <= 7 && narrow)
11058 {
11059 if (flags)
11060 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11061 else
11062 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11063 }
11064 if (opcode)
11065 {
11066 inst.instruction = THUMB_OP16(opcode);
11067 inst.instruction |= (Rd << 4) | Rs;
11068 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11069 || (inst.relocs[0].type
11070 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11071 {
11072 if (inst.size_req == 2)
11073 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11074 else
11075 inst.relax = opcode;
11076 }
11077 }
11078 else
11079 constraint (inst.size_req == 2, BAD_HIREG);
11080 }
11081 if (inst.size_req == 4
11082 || (inst.size_req != 2 && !opcode))
11083 {
11084 constraint ((inst.relocs[0].type
11085 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11086 && (inst.relocs[0].type
11087 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11088 THUMB1_RELOC_ONLY);
11089 if (Rd == REG_PC)
11090 {
11091 constraint (add, BAD_PC);
11092 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11093 _("only SUBS PC, LR, #const allowed"));
11094 constraint (inst.relocs[0].exp.X_op != O_constant,
11095 _("expression too complex"));
11096 constraint (inst.relocs[0].exp.X_add_number < 0
11097 || inst.relocs[0].exp.X_add_number > 0xff,
11098 _("immediate value out of range"));
11099 inst.instruction = T2_SUBS_PC_LR
11100 | inst.relocs[0].exp.X_add_number;
11101 inst.relocs[0].type = BFD_RELOC_UNUSED;
11102 return;
11103 }
11104 else if (Rs == REG_PC)
11105 {
11106 /* Always use addw/subw. */
11107 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11108 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11109 }
11110 else
11111 {
11112 inst.instruction = THUMB_OP32 (inst.instruction);
11113 inst.instruction = (inst.instruction & 0xe1ffffff)
11114 | 0x10000000;
11115 if (flags)
11116 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11117 else
11118 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11119 }
11120 inst.instruction |= Rd << 8;
11121 inst.instruction |= Rs << 16;
11122 }
11123 }
11124 else
11125 {
11126 unsigned int value = inst.relocs[0].exp.X_add_number;
11127 unsigned int shift = inst.operands[2].shift_kind;
11128
11129 Rn = inst.operands[2].reg;
11130 /* See if we can do this with a 16-bit instruction. */
11131 if (!inst.operands[2].shifted && inst.size_req != 4)
11132 {
11133 if (Rd > 7 || Rs > 7 || Rn > 7)
11134 narrow = FALSE;
11135
11136 if (narrow)
11137 {
11138 inst.instruction = ((inst.instruction == T_MNEM_adds
11139 || inst.instruction == T_MNEM_add)
11140 ? T_OPCODE_ADD_R3
11141 : T_OPCODE_SUB_R3);
11142 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11143 return;
11144 }
11145
11146 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11147 {
11148 /* Thumb-1 cores (except v6-M) require at least one high
11149 register in a narrow non flag setting add. */
11150 if (Rd > 7 || Rn > 7
11151 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11152 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11153 {
11154 if (Rd == Rn)
11155 {
11156 Rn = Rs;
11157 Rs = Rd;
11158 }
11159 inst.instruction = T_OPCODE_ADD_HI;
11160 inst.instruction |= (Rd & 8) << 4;
11161 inst.instruction |= (Rd & 7);
11162 inst.instruction |= Rn << 3;
11163 return;
11164 }
11165 }
11166 }
11167
11168 constraint (Rd == REG_PC, BAD_PC);
11169 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11170 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11171 constraint (Rs == REG_PC, BAD_PC);
11172 reject_bad_reg (Rn);
11173
11174 /* If we get here, it can't be done in 16 bits. */
11175 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11176 _("shift must be constant"));
11177 inst.instruction = THUMB_OP32 (inst.instruction);
11178 inst.instruction |= Rd << 8;
11179 inst.instruction |= Rs << 16;
11180 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11181 _("shift value over 3 not allowed in thumb mode"));
11182 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11183 _("only LSL shift allowed in thumb mode"));
11184 encode_thumb32_shifted_operand (2);
11185 }
11186 }
11187 else
11188 {
11189 constraint (inst.instruction == T_MNEM_adds
11190 || inst.instruction == T_MNEM_subs,
11191 BAD_THUMB32);
11192
11193 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11194 {
11195 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11196 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11197 BAD_HIREG);
11198
11199 inst.instruction = (inst.instruction == T_MNEM_add
11200 ? 0x0000 : 0x8000);
11201 inst.instruction |= (Rd << 4) | Rs;
11202 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11203 return;
11204 }
11205
11206 Rn = inst.operands[2].reg;
11207 constraint (inst.operands[2].shifted, _("unshifted register required"));
11208
11209 /* We now have Rd, Rs, and Rn set to registers. */
11210 if (Rd > 7 || Rs > 7 || Rn > 7)
11211 {
11212 /* Can't do this for SUB. */
11213 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11214 inst.instruction = T_OPCODE_ADD_HI;
11215 inst.instruction |= (Rd & 8) << 4;
11216 inst.instruction |= (Rd & 7);
11217 if (Rs == Rd)
11218 inst.instruction |= Rn << 3;
11219 else if (Rn == Rd)
11220 inst.instruction |= Rs << 3;
11221 else
11222 constraint (1, _("dest must overlap one source register"));
11223 }
11224 else
11225 {
11226 inst.instruction = (inst.instruction == T_MNEM_add
11227 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11228 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11229 }
11230 }
11231 }
11232
11233 static void
11234 do_t_adr (void)
11235 {
11236 unsigned Rd;
11237
11238 Rd = inst.operands[0].reg;
11239 reject_bad_reg (Rd);
11240
11241 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11242 {
11243 /* Defer to section relaxation. */
11244 inst.relax = inst.instruction;
11245 inst.instruction = THUMB_OP16 (inst.instruction);
11246 inst.instruction |= Rd << 4;
11247 }
11248 else if (unified_syntax && inst.size_req != 2)
11249 {
11250 /* Generate a 32-bit opcode. */
11251 inst.instruction = THUMB_OP32 (inst.instruction);
11252 inst.instruction |= Rd << 8;
11253 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11254 inst.relocs[0].pc_rel = 1;
11255 }
11256 else
11257 {
11258 /* Generate a 16-bit opcode. */
11259 inst.instruction = THUMB_OP16 (inst.instruction);
11260 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11261 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11262 inst.relocs[0].pc_rel = 1;
11263 inst.instruction |= Rd << 4;
11264 }
11265
11266 if (inst.relocs[0].exp.X_op == O_symbol
11267 && inst.relocs[0].exp.X_add_symbol != NULL
11268 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11269 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11270 inst.relocs[0].exp.X_add_number += 1;
11271 }
11272
11273 /* Arithmetic instructions for which there is just one 16-bit
11274 instruction encoding, and it allows only two low registers.
11275 For maximal compatibility with ARM syntax, we allow three register
11276 operands even when Thumb-32 instructions are not available, as long
11277 as the first two are identical. For instance, both "sbc r0,r1" and
11278 "sbc r0,r0,r1" are allowed. */
11279 static void
11280 do_t_arit3 (void)
11281 {
11282 int Rd, Rs, Rn;
11283
11284 Rd = inst.operands[0].reg;
11285 Rs = (inst.operands[1].present
11286 ? inst.operands[1].reg /* Rd, Rs, foo */
11287 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11288 Rn = inst.operands[2].reg;
11289
11290 reject_bad_reg (Rd);
11291 reject_bad_reg (Rs);
11292 if (inst.operands[2].isreg)
11293 reject_bad_reg (Rn);
11294
11295 if (unified_syntax)
11296 {
11297 if (!inst.operands[2].isreg)
11298 {
11299 /* For an immediate, we always generate a 32-bit opcode;
11300 section relaxation will shrink it later if possible. */
11301 inst.instruction = THUMB_OP32 (inst.instruction);
11302 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11303 inst.instruction |= Rd << 8;
11304 inst.instruction |= Rs << 16;
11305 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11306 }
11307 else
11308 {
11309 bfd_boolean narrow;
11310
11311 /* See if we can do this with a 16-bit instruction. */
11312 if (THUMB_SETS_FLAGS (inst.instruction))
11313 narrow = !in_pred_block ();
11314 else
11315 narrow = in_pred_block ();
11316
11317 if (Rd > 7 || Rn > 7 || Rs > 7)
11318 narrow = FALSE;
11319 if (inst.operands[2].shifted)
11320 narrow = FALSE;
11321 if (inst.size_req == 4)
11322 narrow = FALSE;
11323
11324 if (narrow
11325 && Rd == Rs)
11326 {
11327 inst.instruction = THUMB_OP16 (inst.instruction);
11328 inst.instruction |= Rd;
11329 inst.instruction |= Rn << 3;
11330 return;
11331 }
11332
11333 /* If we get here, it can't be done in 16 bits. */
11334 constraint (inst.operands[2].shifted
11335 && inst.operands[2].immisreg,
11336 _("shift must be constant"));
11337 inst.instruction = THUMB_OP32 (inst.instruction);
11338 inst.instruction |= Rd << 8;
11339 inst.instruction |= Rs << 16;
11340 encode_thumb32_shifted_operand (2);
11341 }
11342 }
11343 else
11344 {
11345 /* On its face this is a lie - the instruction does set the
11346 flags. However, the only supported mnemonic in this mode
11347 says it doesn't. */
11348 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11349
11350 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11351 _("unshifted register required"));
11352 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11353 constraint (Rd != Rs,
11354 _("dest and source1 must be the same register"));
11355
11356 inst.instruction = THUMB_OP16 (inst.instruction);
11357 inst.instruction |= Rd;
11358 inst.instruction |= Rn << 3;
11359 }
11360 }
11361
11362 /* Similarly, but for instructions where the arithmetic operation is
11363 commutative, so we can allow either of them to be different from
11364 the destination operand in a 16-bit instruction. For instance, all
11365 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11366 accepted. */
11367 static void
11368 do_t_arit3c (void)
11369 {
11370 int Rd, Rs, Rn;
11371
11372 Rd = inst.operands[0].reg;
11373 Rs = (inst.operands[1].present
11374 ? inst.operands[1].reg /* Rd, Rs, foo */
11375 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11376 Rn = inst.operands[2].reg;
11377
11378 reject_bad_reg (Rd);
11379 reject_bad_reg (Rs);
11380 if (inst.operands[2].isreg)
11381 reject_bad_reg (Rn);
11382
11383 if (unified_syntax)
11384 {
11385 if (!inst.operands[2].isreg)
11386 {
11387 /* For an immediate, we always generate a 32-bit opcode;
11388 section relaxation will shrink it later if possible. */
11389 inst.instruction = THUMB_OP32 (inst.instruction);
11390 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11391 inst.instruction |= Rd << 8;
11392 inst.instruction |= Rs << 16;
11393 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11394 }
11395 else
11396 {
11397 bfd_boolean narrow;
11398
11399 /* See if we can do this with a 16-bit instruction. */
11400 if (THUMB_SETS_FLAGS (inst.instruction))
11401 narrow = !in_pred_block ();
11402 else
11403 narrow = in_pred_block ();
11404
11405 if (Rd > 7 || Rn > 7 || Rs > 7)
11406 narrow = FALSE;
11407 if (inst.operands[2].shifted)
11408 narrow = FALSE;
11409 if (inst.size_req == 4)
11410 narrow = FALSE;
11411
11412 if (narrow)
11413 {
11414 if (Rd == Rs)
11415 {
11416 inst.instruction = THUMB_OP16 (inst.instruction);
11417 inst.instruction |= Rd;
11418 inst.instruction |= Rn << 3;
11419 return;
11420 }
11421 if (Rd == Rn)
11422 {
11423 inst.instruction = THUMB_OP16 (inst.instruction);
11424 inst.instruction |= Rd;
11425 inst.instruction |= Rs << 3;
11426 return;
11427 }
11428 }
11429
11430 /* If we get here, it can't be done in 16 bits. */
11431 constraint (inst.operands[2].shifted
11432 && inst.operands[2].immisreg,
11433 _("shift must be constant"));
11434 inst.instruction = THUMB_OP32 (inst.instruction);
11435 inst.instruction |= Rd << 8;
11436 inst.instruction |= Rs << 16;
11437 encode_thumb32_shifted_operand (2);
11438 }
11439 }
11440 else
11441 {
11442 /* On its face this is a lie - the instruction does set the
11443 flags. However, the only supported mnemonic in this mode
11444 says it doesn't. */
11445 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11446
11447 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11448 _("unshifted register required"));
11449 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11450
11451 inst.instruction = THUMB_OP16 (inst.instruction);
11452 inst.instruction |= Rd;
11453
11454 if (Rd == Rs)
11455 inst.instruction |= Rn << 3;
11456 else if (Rd == Rn)
11457 inst.instruction |= Rs << 3;
11458 else
11459 constraint (1, _("dest must overlap one source register"));
11460 }
11461 }
11462
11463 static void
11464 do_t_bfc (void)
11465 {
11466 unsigned Rd;
11467 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11468 constraint (msb > 32, _("bit-field extends past end of register"));
11469 /* The instruction encoding stores the LSB and MSB,
11470 not the LSB and width. */
11471 Rd = inst.operands[0].reg;
11472 reject_bad_reg (Rd);
11473 inst.instruction |= Rd << 8;
11474 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11475 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11476 inst.instruction |= msb - 1;
11477 }
11478
11479 static void
11480 do_t_bfi (void)
11481 {
11482 int Rd, Rn;
11483 unsigned int msb;
11484
11485 Rd = inst.operands[0].reg;
11486 reject_bad_reg (Rd);
11487
11488 /* #0 in second position is alternative syntax for bfc, which is
11489 the same instruction but with REG_PC in the Rm field. */
11490 if (!inst.operands[1].isreg)
11491 Rn = REG_PC;
11492 else
11493 {
11494 Rn = inst.operands[1].reg;
11495 reject_bad_reg (Rn);
11496 }
11497
11498 msb = inst.operands[2].imm + inst.operands[3].imm;
11499 constraint (msb > 32, _("bit-field extends past end of register"));
11500 /* The instruction encoding stores the LSB and MSB,
11501 not the LSB and width. */
11502 inst.instruction |= Rd << 8;
11503 inst.instruction |= Rn << 16;
11504 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11505 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11506 inst.instruction |= msb - 1;
11507 }
11508
11509 static void
11510 do_t_bfx (void)
11511 {
11512 unsigned Rd, Rn;
11513
11514 Rd = inst.operands[0].reg;
11515 Rn = inst.operands[1].reg;
11516
11517 reject_bad_reg (Rd);
11518 reject_bad_reg (Rn);
11519
11520 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11521 _("bit-field extends past end of register"));
11522 inst.instruction |= Rd << 8;
11523 inst.instruction |= Rn << 16;
11524 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11525 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11526 inst.instruction |= inst.operands[3].imm - 1;
11527 }
11528
11529 /* ARM V5 Thumb BLX (argument parse)
11530 BLX <target_addr> which is BLX(1)
11531 BLX <Rm> which is BLX(2)
11532 Unfortunately, there are two different opcodes for this mnemonic.
11533 So, the insns[].value is not used, and the code here zaps values
11534 into inst.instruction.
11535
11536 ??? How to take advantage of the additional two bits of displacement
11537 available in Thumb32 mode? Need new relocation? */
11538
11539 static void
11540 do_t_blx (void)
11541 {
11542 set_pred_insn_type_last ();
11543
11544 if (inst.operands[0].isreg)
11545 {
11546 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11547 /* We have a register, so this is BLX(2). */
11548 inst.instruction |= inst.operands[0].reg << 3;
11549 }
11550 else
11551 {
11552 /* No register. This must be BLX(1). */
11553 inst.instruction = 0xf000e800;
11554 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11555 }
11556 }
11557
11558 static void
11559 do_t_branch (void)
11560 {
11561 int opcode;
11562 int cond;
11563 bfd_reloc_code_real_type reloc;
11564
11565 cond = inst.cond;
11566 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
11567
11568 if (in_pred_block ())
11569 {
11570 /* Conditional branches inside IT blocks are encoded as unconditional
11571 branches. */
11572 cond = COND_ALWAYS;
11573 }
11574 else
11575 cond = inst.cond;
11576
11577 if (cond != COND_ALWAYS)
11578 opcode = T_MNEM_bcond;
11579 else
11580 opcode = inst.instruction;
11581
11582 if (unified_syntax
11583 && (inst.size_req == 4
11584 || (inst.size_req != 2
11585 && (inst.operands[0].hasreloc
11586 || inst.relocs[0].exp.X_op == O_constant))))
11587 {
11588 inst.instruction = THUMB_OP32(opcode);
11589 if (cond == COND_ALWAYS)
11590 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11591 else
11592 {
11593 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11594 _("selected architecture does not support "
11595 "wide conditional branch instruction"));
11596
11597 gas_assert (cond != 0xF);
11598 inst.instruction |= cond << 22;
11599 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11600 }
11601 }
11602 else
11603 {
11604 inst.instruction = THUMB_OP16(opcode);
11605 if (cond == COND_ALWAYS)
11606 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11607 else
11608 {
11609 inst.instruction |= cond << 8;
11610 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11611 }
11612 /* Allow section relaxation. */
11613 if (unified_syntax && inst.size_req != 2)
11614 inst.relax = opcode;
11615 }
11616 inst.relocs[0].type = reloc;
11617 inst.relocs[0].pc_rel = 1;
11618 }
11619
11620 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11621 between the two is the maximum immediate allowed - which is passed in
11622 RANGE. */
11623 static void
11624 do_t_bkpt_hlt1 (int range)
11625 {
11626 constraint (inst.cond != COND_ALWAYS,
11627 _("instruction is always unconditional"));
11628 if (inst.operands[0].present)
11629 {
11630 constraint (inst.operands[0].imm > range,
11631 _("immediate value out of range"));
11632 inst.instruction |= inst.operands[0].imm;
11633 }
11634
11635 set_pred_insn_type (NEUTRAL_IT_INSN);
11636 }
11637
11638 static void
11639 do_t_hlt (void)
11640 {
11641 do_t_bkpt_hlt1 (63);
11642 }
11643
11644 static void
11645 do_t_bkpt (void)
11646 {
11647 do_t_bkpt_hlt1 (255);
11648 }
11649
11650 static void
11651 do_t_branch23 (void)
11652 {
11653 set_pred_insn_type_last ();
11654 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11655
11656 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11657 this file. We used to simply ignore the PLT reloc type here --
11658 the branch encoding is now needed to deal with TLSCALL relocs.
11659 So if we see a PLT reloc now, put it back to how it used to be to
11660 keep the preexisting behaviour. */
11661 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11662 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11663
11664 #if defined(OBJ_COFF)
11665 /* If the destination of the branch is a defined symbol which does not have
11666 the THUMB_FUNC attribute, then we must be calling a function which has
11667 the (interfacearm) attribute. We look for the Thumb entry point to that
11668 function and change the branch to refer to that function instead. */
11669 if ( inst.relocs[0].exp.X_op == O_symbol
11670 && inst.relocs[0].exp.X_add_symbol != NULL
11671 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11672 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11673 inst.relocs[0].exp.X_add_symbol
11674 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11675 #endif
11676 }
11677
11678 static void
11679 do_t_bx (void)
11680 {
11681 set_pred_insn_type_last ();
11682 inst.instruction |= inst.operands[0].reg << 3;
11683 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11684 should cause the alignment to be checked once it is known. This is
11685 because BX PC only works if the instruction is word aligned. */
11686 }
11687
11688 static void
11689 do_t_bxj (void)
11690 {
11691 int Rm;
11692
11693 set_pred_insn_type_last ();
11694 Rm = inst.operands[0].reg;
11695 reject_bad_reg (Rm);
11696 inst.instruction |= Rm << 16;
11697 }
11698
11699 static void
11700 do_t_clz (void)
11701 {
11702 unsigned Rd;
11703 unsigned Rm;
11704
11705 Rd = inst.operands[0].reg;
11706 Rm = inst.operands[1].reg;
11707
11708 reject_bad_reg (Rd);
11709 reject_bad_reg (Rm);
11710
11711 inst.instruction |= Rd << 8;
11712 inst.instruction |= Rm << 16;
11713 inst.instruction |= Rm;
11714 }
11715
11716 static void
11717 do_t_csdb (void)
11718 {
11719 set_pred_insn_type (OUTSIDE_PRED_INSN);
11720 }
11721
11722 static void
11723 do_t_cps (void)
11724 {
11725 set_pred_insn_type (OUTSIDE_PRED_INSN);
11726 inst.instruction |= inst.operands[0].imm;
11727 }
11728
11729 static void
11730 do_t_cpsi (void)
11731 {
11732 set_pred_insn_type (OUTSIDE_PRED_INSN);
11733 if (unified_syntax
11734 && (inst.operands[1].present || inst.size_req == 4)
11735 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11736 {
11737 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11738 inst.instruction = 0xf3af8000;
11739 inst.instruction |= imod << 9;
11740 inst.instruction |= inst.operands[0].imm << 5;
11741 if (inst.operands[1].present)
11742 inst.instruction |= 0x100 | inst.operands[1].imm;
11743 }
11744 else
11745 {
11746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11747 && (inst.operands[0].imm & 4),
11748 _("selected processor does not support 'A' form "
11749 "of this instruction"));
11750 constraint (inst.operands[1].present || inst.size_req == 4,
11751 _("Thumb does not support the 2-argument "
11752 "form of this instruction"));
11753 inst.instruction |= inst.operands[0].imm;
11754 }
11755 }
11756
11757 /* THUMB CPY instruction (argument parse). */
11758
11759 static void
11760 do_t_cpy (void)
11761 {
11762 if (inst.size_req == 4)
11763 {
11764 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11765 inst.instruction |= inst.operands[0].reg << 8;
11766 inst.instruction |= inst.operands[1].reg;
11767 }
11768 else
11769 {
11770 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11771 inst.instruction |= (inst.operands[0].reg & 0x7);
11772 inst.instruction |= inst.operands[1].reg << 3;
11773 }
11774 }
11775
11776 static void
11777 do_t_cbz (void)
11778 {
11779 set_pred_insn_type (OUTSIDE_PRED_INSN);
11780 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11781 inst.instruction |= inst.operands[0].reg;
11782 inst.relocs[0].pc_rel = 1;
11783 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11784 }
11785
11786 static void
11787 do_t_dbg (void)
11788 {
11789 inst.instruction |= inst.operands[0].imm;
11790 }
11791
11792 static void
11793 do_t_div (void)
11794 {
11795 unsigned Rd, Rn, Rm;
11796
11797 Rd = inst.operands[0].reg;
11798 Rn = (inst.operands[1].present
11799 ? inst.operands[1].reg : Rd);
11800 Rm = inst.operands[2].reg;
11801
11802 reject_bad_reg (Rd);
11803 reject_bad_reg (Rn);
11804 reject_bad_reg (Rm);
11805
11806 inst.instruction |= Rd << 8;
11807 inst.instruction |= Rn << 16;
11808 inst.instruction |= Rm;
11809 }
11810
11811 static void
11812 do_t_hint (void)
11813 {
11814 if (unified_syntax && inst.size_req == 4)
11815 inst.instruction = THUMB_OP32 (inst.instruction);
11816 else
11817 inst.instruction = THUMB_OP16 (inst.instruction);
11818 }
11819
11820 static void
11821 do_t_it (void)
11822 {
11823 unsigned int cond = inst.operands[0].imm;
11824
11825 set_pred_insn_type (IT_INSN);
11826 now_pred.mask = (inst.instruction & 0xf) | 0x10;
11827 now_pred.cc = cond;
11828 now_pred.warn_deprecated = FALSE;
11829 now_pred.type = SCALAR_PRED;
11830
11831 /* If the condition is a negative condition, invert the mask. */
11832 if ((cond & 0x1) == 0x0)
11833 {
11834 unsigned int mask = inst.instruction & 0x000f;
11835
11836 if ((mask & 0x7) == 0)
11837 {
11838 /* No conversion needed. */
11839 now_pred.block_length = 1;
11840 }
11841 else if ((mask & 0x3) == 0)
11842 {
11843 mask ^= 0x8;
11844 now_pred.block_length = 2;
11845 }
11846 else if ((mask & 0x1) == 0)
11847 {
11848 mask ^= 0xC;
11849 now_pred.block_length = 3;
11850 }
11851 else
11852 {
11853 mask ^= 0xE;
11854 now_pred.block_length = 4;
11855 }
11856
11857 inst.instruction &= 0xfff0;
11858 inst.instruction |= mask;
11859 }
11860
11861 inst.instruction |= cond << 4;
11862 }
11863
11864 static void
11865 do_mve_vpt (void)
11866 {
11867 /* We are dealing with a vector predicated block. */
11868 set_pred_insn_type (VPT_INSN);
11869 now_pred.cc = 0;
11870 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
11871 | ((inst.instruction & 0xe000) >> 13);
11872 now_pred.warn_deprecated = FALSE;
11873 now_pred.type = VECTOR_PRED;
11874 }
11875
11876 /* Helper function used for both push/pop and ldm/stm. */
11877 static void
11878 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11879 bfd_boolean writeback)
11880 {
11881 bfd_boolean load, store;
11882
11883 gas_assert (base != -1 || !do_io);
11884 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11885 store = do_io && !load;
11886
11887 if (mask & (1 << 13))
11888 inst.error = _("SP not allowed in register list");
11889
11890 if (do_io && (mask & (1 << base)) != 0
11891 && writeback)
11892 inst.error = _("having the base register in the register list when "
11893 "using write back is UNPREDICTABLE");
11894
11895 if (load)
11896 {
11897 if (mask & (1 << 15))
11898 {
11899 if (mask & (1 << 14))
11900 inst.error = _("LR and PC should not both be in register list");
11901 else
11902 set_pred_insn_type_last ();
11903 }
11904 }
11905 else if (store)
11906 {
11907 if (mask & (1 << 15))
11908 inst.error = _("PC not allowed in register list");
11909 }
11910
11911 if (do_io && ((mask & (mask - 1)) == 0))
11912 {
11913 /* Single register transfers implemented as str/ldr. */
11914 if (writeback)
11915 {
11916 if (inst.instruction & (1 << 23))
11917 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11918 else
11919 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11920 }
11921 else
11922 {
11923 if (inst.instruction & (1 << 23))
11924 inst.instruction = 0x00800000; /* ia -> [base] */
11925 else
11926 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11927 }
11928
11929 inst.instruction |= 0xf8400000;
11930 if (load)
11931 inst.instruction |= 0x00100000;
11932
11933 mask = ffs (mask) - 1;
11934 mask <<= 12;
11935 }
11936 else if (writeback)
11937 inst.instruction |= WRITE_BACK;
11938
11939 inst.instruction |= mask;
11940 if (do_io)
11941 inst.instruction |= base << 16;
11942 }
11943
11944 static void
11945 do_t_ldmstm (void)
11946 {
11947 /* This really doesn't seem worth it. */
11948 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11949 _("expression too complex"));
11950 constraint (inst.operands[1].writeback,
11951 _("Thumb load/store multiple does not support {reglist}^"));
11952
11953 if (unified_syntax)
11954 {
11955 bfd_boolean narrow;
11956 unsigned mask;
11957
11958 narrow = FALSE;
11959 /* See if we can use a 16-bit instruction. */
11960 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11961 && inst.size_req != 4
11962 && !(inst.operands[1].imm & ~0xff))
11963 {
11964 mask = 1 << inst.operands[0].reg;
11965
11966 if (inst.operands[0].reg <= 7)
11967 {
11968 if (inst.instruction == T_MNEM_stmia
11969 ? inst.operands[0].writeback
11970 : (inst.operands[0].writeback
11971 == !(inst.operands[1].imm & mask)))
11972 {
11973 if (inst.instruction == T_MNEM_stmia
11974 && (inst.operands[1].imm & mask)
11975 && (inst.operands[1].imm & (mask - 1)))
11976 as_warn (_("value stored for r%d is UNKNOWN"),
11977 inst.operands[0].reg);
11978
11979 inst.instruction = THUMB_OP16 (inst.instruction);
11980 inst.instruction |= inst.operands[0].reg << 8;
11981 inst.instruction |= inst.operands[1].imm;
11982 narrow = TRUE;
11983 }
11984 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11985 {
11986 /* This means 1 register in reg list one of 3 situations:
11987 1. Instruction is stmia, but without writeback.
11988 2. lmdia without writeback, but with Rn not in
11989 reglist.
11990 3. ldmia with writeback, but with Rn in reglist.
11991 Case 3 is UNPREDICTABLE behaviour, so we handle
11992 case 1 and 2 which can be converted into a 16-bit
11993 str or ldr. The SP cases are handled below. */
11994 unsigned long opcode;
11995 /* First, record an error for Case 3. */
11996 if (inst.operands[1].imm & mask
11997 && inst.operands[0].writeback)
11998 inst.error =
11999 _("having the base register in the register list when "
12000 "using write back is UNPREDICTABLE");
12001
12002 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12003 : T_MNEM_ldr);
12004 inst.instruction = THUMB_OP16 (opcode);
12005 inst.instruction |= inst.operands[0].reg << 3;
12006 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12007 narrow = TRUE;
12008 }
12009 }
12010 else if (inst.operands[0] .reg == REG_SP)
12011 {
12012 if (inst.operands[0].writeback)
12013 {
12014 inst.instruction =
12015 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12016 ? T_MNEM_push : T_MNEM_pop);
12017 inst.instruction |= inst.operands[1].imm;
12018 narrow = TRUE;
12019 }
12020 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12021 {
12022 inst.instruction =
12023 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12024 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12025 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12026 narrow = TRUE;
12027 }
12028 }
12029 }
12030
12031 if (!narrow)
12032 {
12033 if (inst.instruction < 0xffff)
12034 inst.instruction = THUMB_OP32 (inst.instruction);
12035
12036 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12037 inst.operands[1].imm,
12038 inst.operands[0].writeback);
12039 }
12040 }
12041 else
12042 {
12043 constraint (inst.operands[0].reg > 7
12044 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12045 constraint (inst.instruction != T_MNEM_ldmia
12046 && inst.instruction != T_MNEM_stmia,
12047 _("Thumb-2 instruction only valid in unified syntax"));
12048 if (inst.instruction == T_MNEM_stmia)
12049 {
12050 if (!inst.operands[0].writeback)
12051 as_warn (_("this instruction will write back the base register"));
12052 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12053 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12054 as_warn (_("value stored for r%d is UNKNOWN"),
12055 inst.operands[0].reg);
12056 }
12057 else
12058 {
12059 if (!inst.operands[0].writeback
12060 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12061 as_warn (_("this instruction will write back the base register"));
12062 else if (inst.operands[0].writeback
12063 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12064 as_warn (_("this instruction will not write back the base register"));
12065 }
12066
12067 inst.instruction = THUMB_OP16 (inst.instruction);
12068 inst.instruction |= inst.operands[0].reg << 8;
12069 inst.instruction |= inst.operands[1].imm;
12070 }
12071 }
12072
12073 static void
12074 do_t_ldrex (void)
12075 {
12076 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12077 || inst.operands[1].postind || inst.operands[1].writeback
12078 || inst.operands[1].immisreg || inst.operands[1].shifted
12079 || inst.operands[1].negative,
12080 BAD_ADDR_MODE);
12081
12082 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12083
12084 inst.instruction |= inst.operands[0].reg << 12;
12085 inst.instruction |= inst.operands[1].reg << 16;
12086 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12087 }
12088
12089 static void
12090 do_t_ldrexd (void)
12091 {
12092 if (!inst.operands[1].present)
12093 {
12094 constraint (inst.operands[0].reg == REG_LR,
12095 _("r14 not allowed as first register "
12096 "when second register is omitted"));
12097 inst.operands[1].reg = inst.operands[0].reg + 1;
12098 }
12099 constraint (inst.operands[0].reg == inst.operands[1].reg,
12100 BAD_OVERLAP);
12101
12102 inst.instruction |= inst.operands[0].reg << 12;
12103 inst.instruction |= inst.operands[1].reg << 8;
12104 inst.instruction |= inst.operands[2].reg << 16;
12105 }
12106
12107 static void
12108 do_t_ldst (void)
12109 {
12110 unsigned long opcode;
12111 int Rn;
12112
12113 if (inst.operands[0].isreg
12114 && !inst.operands[0].preind
12115 && inst.operands[0].reg == REG_PC)
12116 set_pred_insn_type_last ();
12117
12118 opcode = inst.instruction;
12119 if (unified_syntax)
12120 {
12121 if (!inst.operands[1].isreg)
12122 {
12123 if (opcode <= 0xffff)
12124 inst.instruction = THUMB_OP32 (opcode);
12125 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12126 return;
12127 }
12128 if (inst.operands[1].isreg
12129 && !inst.operands[1].writeback
12130 && !inst.operands[1].shifted && !inst.operands[1].postind
12131 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12132 && opcode <= 0xffff
12133 && inst.size_req != 4)
12134 {
12135 /* Insn may have a 16-bit form. */
12136 Rn = inst.operands[1].reg;
12137 if (inst.operands[1].immisreg)
12138 {
12139 inst.instruction = THUMB_OP16 (opcode);
12140 /* [Rn, Rik] */
12141 if (Rn <= 7 && inst.operands[1].imm <= 7)
12142 goto op16;
12143 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12144 reject_bad_reg (inst.operands[1].imm);
12145 }
12146 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12147 && opcode != T_MNEM_ldrsb)
12148 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12149 || (Rn == REG_SP && opcode == T_MNEM_str))
12150 {
12151 /* [Rn, #const] */
12152 if (Rn > 7)
12153 {
12154 if (Rn == REG_PC)
12155 {
12156 if (inst.relocs[0].pc_rel)
12157 opcode = T_MNEM_ldr_pc2;
12158 else
12159 opcode = T_MNEM_ldr_pc;
12160 }
12161 else
12162 {
12163 if (opcode == T_MNEM_ldr)
12164 opcode = T_MNEM_ldr_sp;
12165 else
12166 opcode = T_MNEM_str_sp;
12167 }
12168 inst.instruction = inst.operands[0].reg << 8;
12169 }
12170 else
12171 {
12172 inst.instruction = inst.operands[0].reg;
12173 inst.instruction |= inst.operands[1].reg << 3;
12174 }
12175 inst.instruction |= THUMB_OP16 (opcode);
12176 if (inst.size_req == 2)
12177 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12178 else
12179 inst.relax = opcode;
12180 return;
12181 }
12182 }
12183 /* Definitely a 32-bit variant. */
12184
12185 /* Warning for Erratum 752419. */
12186 if (opcode == T_MNEM_ldr
12187 && inst.operands[0].reg == REG_SP
12188 && inst.operands[1].writeback == 1
12189 && !inst.operands[1].immisreg)
12190 {
12191 if (no_cpu_selected ()
12192 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12193 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12194 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12195 as_warn (_("This instruction may be unpredictable "
12196 "if executed on M-profile cores "
12197 "with interrupts enabled."));
12198 }
12199
12200 /* Do some validations regarding addressing modes. */
12201 if (inst.operands[1].immisreg)
12202 reject_bad_reg (inst.operands[1].imm);
12203
12204 constraint (inst.operands[1].writeback == 1
12205 && inst.operands[0].reg == inst.operands[1].reg,
12206 BAD_OVERLAP);
12207
12208 inst.instruction = THUMB_OP32 (opcode);
12209 inst.instruction |= inst.operands[0].reg << 12;
12210 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12211 check_ldr_r15_aligned ();
12212 return;
12213 }
12214
12215 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12216
12217 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12218 {
12219 /* Only [Rn,Rm] is acceptable. */
12220 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12221 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12222 || inst.operands[1].postind || inst.operands[1].shifted
12223 || inst.operands[1].negative,
12224 _("Thumb does not support this addressing mode"));
12225 inst.instruction = THUMB_OP16 (inst.instruction);
12226 goto op16;
12227 }
12228
12229 inst.instruction = THUMB_OP16 (inst.instruction);
12230 if (!inst.operands[1].isreg)
12231 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12232 return;
12233
12234 constraint (!inst.operands[1].preind
12235 || inst.operands[1].shifted
12236 || inst.operands[1].writeback,
12237 _("Thumb does not support this addressing mode"));
12238 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12239 {
12240 constraint (inst.instruction & 0x0600,
12241 _("byte or halfword not valid for base register"));
12242 constraint (inst.operands[1].reg == REG_PC
12243 && !(inst.instruction & THUMB_LOAD_BIT),
12244 _("r15 based store not allowed"));
12245 constraint (inst.operands[1].immisreg,
12246 _("invalid base register for register offset"));
12247
12248 if (inst.operands[1].reg == REG_PC)
12249 inst.instruction = T_OPCODE_LDR_PC;
12250 else if (inst.instruction & THUMB_LOAD_BIT)
12251 inst.instruction = T_OPCODE_LDR_SP;
12252 else
12253 inst.instruction = T_OPCODE_STR_SP;
12254
12255 inst.instruction |= inst.operands[0].reg << 8;
12256 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12257 return;
12258 }
12259
12260 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12261 if (!inst.operands[1].immisreg)
12262 {
12263 /* Immediate offset. */
12264 inst.instruction |= inst.operands[0].reg;
12265 inst.instruction |= inst.operands[1].reg << 3;
12266 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12267 return;
12268 }
12269
12270 /* Register offset. */
12271 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12272 constraint (inst.operands[1].negative,
12273 _("Thumb does not support this addressing mode"));
12274
12275 op16:
12276 switch (inst.instruction)
12277 {
12278 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12279 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12280 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12281 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12282 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12283 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12284 case 0x5600 /* ldrsb */:
12285 case 0x5e00 /* ldrsh */: break;
12286 default: abort ();
12287 }
12288
12289 inst.instruction |= inst.operands[0].reg;
12290 inst.instruction |= inst.operands[1].reg << 3;
12291 inst.instruction |= inst.operands[1].imm << 6;
12292 }
12293
12294 static void
12295 do_t_ldstd (void)
12296 {
12297 if (!inst.operands[1].present)
12298 {
12299 inst.operands[1].reg = inst.operands[0].reg + 1;
12300 constraint (inst.operands[0].reg == REG_LR,
12301 _("r14 not allowed here"));
12302 constraint (inst.operands[0].reg == REG_R12,
12303 _("r12 not allowed here"));
12304 }
12305
12306 if (inst.operands[2].writeback
12307 && (inst.operands[0].reg == inst.operands[2].reg
12308 || inst.operands[1].reg == inst.operands[2].reg))
12309 as_warn (_("base register written back, and overlaps "
12310 "one of transfer registers"));
12311
12312 inst.instruction |= inst.operands[0].reg << 12;
12313 inst.instruction |= inst.operands[1].reg << 8;
12314 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12315 }
12316
12317 static void
12318 do_t_ldstt (void)
12319 {
12320 inst.instruction |= inst.operands[0].reg << 12;
12321 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12322 }
12323
12324 static void
12325 do_t_mla (void)
12326 {
12327 unsigned Rd, Rn, Rm, Ra;
12328
12329 Rd = inst.operands[0].reg;
12330 Rn = inst.operands[1].reg;
12331 Rm = inst.operands[2].reg;
12332 Ra = inst.operands[3].reg;
12333
12334 reject_bad_reg (Rd);
12335 reject_bad_reg (Rn);
12336 reject_bad_reg (Rm);
12337 reject_bad_reg (Ra);
12338
12339 inst.instruction |= Rd << 8;
12340 inst.instruction |= Rn << 16;
12341 inst.instruction |= Rm;
12342 inst.instruction |= Ra << 12;
12343 }
12344
12345 static void
12346 do_t_mlal (void)
12347 {
12348 unsigned RdLo, RdHi, Rn, Rm;
12349
12350 RdLo = inst.operands[0].reg;
12351 RdHi = inst.operands[1].reg;
12352 Rn = inst.operands[2].reg;
12353 Rm = inst.operands[3].reg;
12354
12355 reject_bad_reg (RdLo);
12356 reject_bad_reg (RdHi);
12357 reject_bad_reg (Rn);
12358 reject_bad_reg (Rm);
12359
12360 inst.instruction |= RdLo << 12;
12361 inst.instruction |= RdHi << 8;
12362 inst.instruction |= Rn << 16;
12363 inst.instruction |= Rm;
12364 }
12365
12366 static void
12367 do_t_mov_cmp (void)
12368 {
12369 unsigned Rn, Rm;
12370
12371 Rn = inst.operands[0].reg;
12372 Rm = inst.operands[1].reg;
12373
12374 if (Rn == REG_PC)
12375 set_pred_insn_type_last ();
12376
12377 if (unified_syntax)
12378 {
12379 int r0off = (inst.instruction == T_MNEM_mov
12380 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12381 unsigned long opcode;
12382 bfd_boolean narrow;
12383 bfd_boolean low_regs;
12384
12385 low_regs = (Rn <= 7 && Rm <= 7);
12386 opcode = inst.instruction;
12387 if (in_pred_block ())
12388 narrow = opcode != T_MNEM_movs;
12389 else
12390 narrow = opcode != T_MNEM_movs || low_regs;
12391 if (inst.size_req == 4
12392 || inst.operands[1].shifted)
12393 narrow = FALSE;
12394
12395 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12396 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12397 && !inst.operands[1].shifted
12398 && Rn == REG_PC
12399 && Rm == REG_LR)
12400 {
12401 inst.instruction = T2_SUBS_PC_LR;
12402 return;
12403 }
12404
12405 if (opcode == T_MNEM_cmp)
12406 {
12407 constraint (Rn == REG_PC, BAD_PC);
12408 if (narrow)
12409 {
12410 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12411 but valid. */
12412 warn_deprecated_sp (Rm);
12413 /* R15 was documented as a valid choice for Rm in ARMv6,
12414 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12415 tools reject R15, so we do too. */
12416 constraint (Rm == REG_PC, BAD_PC);
12417 }
12418 else
12419 reject_bad_reg (Rm);
12420 }
12421 else if (opcode == T_MNEM_mov
12422 || opcode == T_MNEM_movs)
12423 {
12424 if (inst.operands[1].isreg)
12425 {
12426 if (opcode == T_MNEM_movs)
12427 {
12428 reject_bad_reg (Rn);
12429 reject_bad_reg (Rm);
12430 }
12431 else if (narrow)
12432 {
12433 /* This is mov.n. */
12434 if ((Rn == REG_SP || Rn == REG_PC)
12435 && (Rm == REG_SP || Rm == REG_PC))
12436 {
12437 as_tsktsk (_("Use of r%u as a source register is "
12438 "deprecated when r%u is the destination "
12439 "register."), Rm, Rn);
12440 }
12441 }
12442 else
12443 {
12444 /* This is mov.w. */
12445 constraint (Rn == REG_PC, BAD_PC);
12446 constraint (Rm == REG_PC, BAD_PC);
12447 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12448 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12449 }
12450 }
12451 else
12452 reject_bad_reg (Rn);
12453 }
12454
12455 if (!inst.operands[1].isreg)
12456 {
12457 /* Immediate operand. */
12458 if (!in_pred_block () && opcode == T_MNEM_mov)
12459 narrow = 0;
12460 if (low_regs && narrow)
12461 {
12462 inst.instruction = THUMB_OP16 (opcode);
12463 inst.instruction |= Rn << 8;
12464 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12465 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12466 {
12467 if (inst.size_req == 2)
12468 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12469 else
12470 inst.relax = opcode;
12471 }
12472 }
12473 else
12474 {
12475 constraint ((inst.relocs[0].type
12476 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12477 && (inst.relocs[0].type
12478 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12479 THUMB1_RELOC_ONLY);
12480
12481 inst.instruction = THUMB_OP32 (inst.instruction);
12482 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12483 inst.instruction |= Rn << r0off;
12484 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12485 }
12486 }
12487 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12488 && (inst.instruction == T_MNEM_mov
12489 || inst.instruction == T_MNEM_movs))
12490 {
12491 /* Register shifts are encoded as separate shift instructions. */
12492 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12493
12494 if (in_pred_block ())
12495 narrow = !flags;
12496 else
12497 narrow = flags;
12498
12499 if (inst.size_req == 4)
12500 narrow = FALSE;
12501
12502 if (!low_regs || inst.operands[1].imm > 7)
12503 narrow = FALSE;
12504
12505 if (Rn != Rm)
12506 narrow = FALSE;
12507
12508 switch (inst.operands[1].shift_kind)
12509 {
12510 case SHIFT_LSL:
12511 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12512 break;
12513 case SHIFT_ASR:
12514 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12515 break;
12516 case SHIFT_LSR:
12517 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12518 break;
12519 case SHIFT_ROR:
12520 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12521 break;
12522 default:
12523 abort ();
12524 }
12525
12526 inst.instruction = opcode;
12527 if (narrow)
12528 {
12529 inst.instruction |= Rn;
12530 inst.instruction |= inst.operands[1].imm << 3;
12531 }
12532 else
12533 {
12534 if (flags)
12535 inst.instruction |= CONDS_BIT;
12536
12537 inst.instruction |= Rn << 8;
12538 inst.instruction |= Rm << 16;
12539 inst.instruction |= inst.operands[1].imm;
12540 }
12541 }
12542 else if (!narrow)
12543 {
12544 /* Some mov with immediate shift have narrow variants.
12545 Register shifts are handled above. */
12546 if (low_regs && inst.operands[1].shifted
12547 && (inst.instruction == T_MNEM_mov
12548 || inst.instruction == T_MNEM_movs))
12549 {
12550 if (in_pred_block ())
12551 narrow = (inst.instruction == T_MNEM_mov);
12552 else
12553 narrow = (inst.instruction == T_MNEM_movs);
12554 }
12555
12556 if (narrow)
12557 {
12558 switch (inst.operands[1].shift_kind)
12559 {
12560 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12561 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12562 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12563 default: narrow = FALSE; break;
12564 }
12565 }
12566
12567 if (narrow)
12568 {
12569 inst.instruction |= Rn;
12570 inst.instruction |= Rm << 3;
12571 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12572 }
12573 else
12574 {
12575 inst.instruction = THUMB_OP32 (inst.instruction);
12576 inst.instruction |= Rn << r0off;
12577 encode_thumb32_shifted_operand (1);
12578 }
12579 }
12580 else
12581 switch (inst.instruction)
12582 {
12583 case T_MNEM_mov:
12584 /* In v4t or v5t a move of two lowregs produces unpredictable
12585 results. Don't allow this. */
12586 if (low_regs)
12587 {
12588 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12589 "MOV Rd, Rs with two low registers is not "
12590 "permitted on this architecture");
12591 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12592 arm_ext_v6);
12593 }
12594
12595 inst.instruction = T_OPCODE_MOV_HR;
12596 inst.instruction |= (Rn & 0x8) << 4;
12597 inst.instruction |= (Rn & 0x7);
12598 inst.instruction |= Rm << 3;
12599 break;
12600
12601 case T_MNEM_movs:
12602 /* We know we have low registers at this point.
12603 Generate LSLS Rd, Rs, #0. */
12604 inst.instruction = T_OPCODE_LSL_I;
12605 inst.instruction |= Rn;
12606 inst.instruction |= Rm << 3;
12607 break;
12608
12609 case T_MNEM_cmp:
12610 if (low_regs)
12611 {
12612 inst.instruction = T_OPCODE_CMP_LR;
12613 inst.instruction |= Rn;
12614 inst.instruction |= Rm << 3;
12615 }
12616 else
12617 {
12618 inst.instruction = T_OPCODE_CMP_HR;
12619 inst.instruction |= (Rn & 0x8) << 4;
12620 inst.instruction |= (Rn & 0x7);
12621 inst.instruction |= Rm << 3;
12622 }
12623 break;
12624 }
12625 return;
12626 }
12627
12628 inst.instruction = THUMB_OP16 (inst.instruction);
12629
12630 /* PR 10443: Do not silently ignore shifted operands. */
12631 constraint (inst.operands[1].shifted,
12632 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12633
12634 if (inst.operands[1].isreg)
12635 {
12636 if (Rn < 8 && Rm < 8)
12637 {
12638 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12639 since a MOV instruction produces unpredictable results. */
12640 if (inst.instruction == T_OPCODE_MOV_I8)
12641 inst.instruction = T_OPCODE_ADD_I3;
12642 else
12643 inst.instruction = T_OPCODE_CMP_LR;
12644
12645 inst.instruction |= Rn;
12646 inst.instruction |= Rm << 3;
12647 }
12648 else
12649 {
12650 if (inst.instruction == T_OPCODE_MOV_I8)
12651 inst.instruction = T_OPCODE_MOV_HR;
12652 else
12653 inst.instruction = T_OPCODE_CMP_HR;
12654 do_t_cpy ();
12655 }
12656 }
12657 else
12658 {
12659 constraint (Rn > 7,
12660 _("only lo regs allowed with immediate"));
12661 inst.instruction |= Rn << 8;
12662 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12663 }
12664 }
12665
12666 static void
12667 do_t_mov16 (void)
12668 {
12669 unsigned Rd;
12670 bfd_vma imm;
12671 bfd_boolean top;
12672
12673 top = (inst.instruction & 0x00800000) != 0;
12674 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12675 {
12676 constraint (top, _(":lower16: not allowed in this instruction"));
12677 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12678 }
12679 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12680 {
12681 constraint (!top, _(":upper16: not allowed in this instruction"));
12682 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12683 }
12684
12685 Rd = inst.operands[0].reg;
12686 reject_bad_reg (Rd);
12687
12688 inst.instruction |= Rd << 8;
12689 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12690 {
12691 imm = inst.relocs[0].exp.X_add_number;
12692 inst.instruction |= (imm & 0xf000) << 4;
12693 inst.instruction |= (imm & 0x0800) << 15;
12694 inst.instruction |= (imm & 0x0700) << 4;
12695 inst.instruction |= (imm & 0x00ff);
12696 }
12697 }
12698
12699 static void
12700 do_t_mvn_tst (void)
12701 {
12702 unsigned Rn, Rm;
12703
12704 Rn = inst.operands[0].reg;
12705 Rm = inst.operands[1].reg;
12706
12707 if (inst.instruction == T_MNEM_cmp
12708 || inst.instruction == T_MNEM_cmn)
12709 constraint (Rn == REG_PC, BAD_PC);
12710 else
12711 reject_bad_reg (Rn);
12712 reject_bad_reg (Rm);
12713
12714 if (unified_syntax)
12715 {
12716 int r0off = (inst.instruction == T_MNEM_mvn
12717 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12718 bfd_boolean narrow;
12719
12720 if (inst.size_req == 4
12721 || inst.instruction > 0xffff
12722 || inst.operands[1].shifted
12723 || Rn > 7 || Rm > 7)
12724 narrow = FALSE;
12725 else if (inst.instruction == T_MNEM_cmn
12726 || inst.instruction == T_MNEM_tst)
12727 narrow = TRUE;
12728 else if (THUMB_SETS_FLAGS (inst.instruction))
12729 narrow = !in_pred_block ();
12730 else
12731 narrow = in_pred_block ();
12732
12733 if (!inst.operands[1].isreg)
12734 {
12735 /* For an immediate, we always generate a 32-bit opcode;
12736 section relaxation will shrink it later if possible. */
12737 if (inst.instruction < 0xffff)
12738 inst.instruction = THUMB_OP32 (inst.instruction);
12739 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12740 inst.instruction |= Rn << r0off;
12741 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12742 }
12743 else
12744 {
12745 /* See if we can do this with a 16-bit instruction. */
12746 if (narrow)
12747 {
12748 inst.instruction = THUMB_OP16 (inst.instruction);
12749 inst.instruction |= Rn;
12750 inst.instruction |= Rm << 3;
12751 }
12752 else
12753 {
12754 constraint (inst.operands[1].shifted
12755 && inst.operands[1].immisreg,
12756 _("shift must be constant"));
12757 if (inst.instruction < 0xffff)
12758 inst.instruction = THUMB_OP32 (inst.instruction);
12759 inst.instruction |= Rn << r0off;
12760 encode_thumb32_shifted_operand (1);
12761 }
12762 }
12763 }
12764 else
12765 {
12766 constraint (inst.instruction > 0xffff
12767 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12768 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12769 _("unshifted register required"));
12770 constraint (Rn > 7 || Rm > 7,
12771 BAD_HIREG);
12772
12773 inst.instruction = THUMB_OP16 (inst.instruction);
12774 inst.instruction |= Rn;
12775 inst.instruction |= Rm << 3;
12776 }
12777 }
12778
12779 static void
12780 do_t_mrs (void)
12781 {
12782 unsigned Rd;
12783
12784 if (do_vfp_nsyn_mrs () == SUCCESS)
12785 return;
12786
12787 Rd = inst.operands[0].reg;
12788 reject_bad_reg (Rd);
12789 inst.instruction |= Rd << 8;
12790
12791 if (inst.operands[1].isreg)
12792 {
12793 unsigned br = inst.operands[1].reg;
12794 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12795 as_bad (_("bad register for mrs"));
12796
12797 inst.instruction |= br & (0xf << 16);
12798 inst.instruction |= (br & 0x300) >> 4;
12799 inst.instruction |= (br & SPSR_BIT) >> 2;
12800 }
12801 else
12802 {
12803 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12804
12805 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12806 {
12807 /* PR gas/12698: The constraint is only applied for m_profile.
12808 If the user has specified -march=all, we want to ignore it as
12809 we are building for any CPU type, including non-m variants. */
12810 bfd_boolean m_profile =
12811 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12812 constraint ((flags != 0) && m_profile, _("selected processor does "
12813 "not support requested special purpose register"));
12814 }
12815 else
12816 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12817 devices). */
12818 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12819 _("'APSR', 'CPSR' or 'SPSR' expected"));
12820
12821 inst.instruction |= (flags & SPSR_BIT) >> 2;
12822 inst.instruction |= inst.operands[1].imm & 0xff;
12823 inst.instruction |= 0xf0000;
12824 }
12825 }
12826
12827 static void
12828 do_t_msr (void)
12829 {
12830 int flags;
12831 unsigned Rn;
12832
12833 if (do_vfp_nsyn_msr () == SUCCESS)
12834 return;
12835
12836 constraint (!inst.operands[1].isreg,
12837 _("Thumb encoding does not support an immediate here"));
12838
12839 if (inst.operands[0].isreg)
12840 flags = (int)(inst.operands[0].reg);
12841 else
12842 flags = inst.operands[0].imm;
12843
12844 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12845 {
12846 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12847
12848 /* PR gas/12698: The constraint is only applied for m_profile.
12849 If the user has specified -march=all, we want to ignore it as
12850 we are building for any CPU type, including non-m variants. */
12851 bfd_boolean m_profile =
12852 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12853 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12854 && (bits & ~(PSR_s | PSR_f)) != 0)
12855 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12856 && bits != PSR_f)) && m_profile,
12857 _("selected processor does not support requested special "
12858 "purpose register"));
12859 }
12860 else
12861 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12862 "requested special purpose register"));
12863
12864 Rn = inst.operands[1].reg;
12865 reject_bad_reg (Rn);
12866
12867 inst.instruction |= (flags & SPSR_BIT) >> 2;
12868 inst.instruction |= (flags & 0xf0000) >> 8;
12869 inst.instruction |= (flags & 0x300) >> 4;
12870 inst.instruction |= (flags & 0xff);
12871 inst.instruction |= Rn << 16;
12872 }
12873
12874 static void
12875 do_t_mul (void)
12876 {
12877 bfd_boolean narrow;
12878 unsigned Rd, Rn, Rm;
12879
12880 if (!inst.operands[2].present)
12881 inst.operands[2].reg = inst.operands[0].reg;
12882
12883 Rd = inst.operands[0].reg;
12884 Rn = inst.operands[1].reg;
12885 Rm = inst.operands[2].reg;
12886
12887 if (unified_syntax)
12888 {
12889 if (inst.size_req == 4
12890 || (Rd != Rn
12891 && Rd != Rm)
12892 || Rn > 7
12893 || Rm > 7)
12894 narrow = FALSE;
12895 else if (inst.instruction == T_MNEM_muls)
12896 narrow = !in_pred_block ();
12897 else
12898 narrow = in_pred_block ();
12899 }
12900 else
12901 {
12902 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12903 constraint (Rn > 7 || Rm > 7,
12904 BAD_HIREG);
12905 narrow = TRUE;
12906 }
12907
12908 if (narrow)
12909 {
12910 /* 16-bit MULS/Conditional MUL. */
12911 inst.instruction = THUMB_OP16 (inst.instruction);
12912 inst.instruction |= Rd;
12913
12914 if (Rd == Rn)
12915 inst.instruction |= Rm << 3;
12916 else if (Rd == Rm)
12917 inst.instruction |= Rn << 3;
12918 else
12919 constraint (1, _("dest must overlap one source register"));
12920 }
12921 else
12922 {
12923 constraint (inst.instruction != T_MNEM_mul,
12924 _("Thumb-2 MUL must not set flags"));
12925 /* 32-bit MUL. */
12926 inst.instruction = THUMB_OP32 (inst.instruction);
12927 inst.instruction |= Rd << 8;
12928 inst.instruction |= Rn << 16;
12929 inst.instruction |= Rm << 0;
12930
12931 reject_bad_reg (Rd);
12932 reject_bad_reg (Rn);
12933 reject_bad_reg (Rm);
12934 }
12935 }
12936
12937 static void
12938 do_t_mull (void)
12939 {
12940 unsigned RdLo, RdHi, Rn, Rm;
12941
12942 RdLo = inst.operands[0].reg;
12943 RdHi = inst.operands[1].reg;
12944 Rn = inst.operands[2].reg;
12945 Rm = inst.operands[3].reg;
12946
12947 reject_bad_reg (RdLo);
12948 reject_bad_reg (RdHi);
12949 reject_bad_reg (Rn);
12950 reject_bad_reg (Rm);
12951
12952 inst.instruction |= RdLo << 12;
12953 inst.instruction |= RdHi << 8;
12954 inst.instruction |= Rn << 16;
12955 inst.instruction |= Rm;
12956
12957 if (RdLo == RdHi)
12958 as_tsktsk (_("rdhi and rdlo must be different"));
12959 }
12960
12961 static void
12962 do_t_nop (void)
12963 {
12964 set_pred_insn_type (NEUTRAL_IT_INSN);
12965
12966 if (unified_syntax)
12967 {
12968 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12969 {
12970 inst.instruction = THUMB_OP32 (inst.instruction);
12971 inst.instruction |= inst.operands[0].imm;
12972 }
12973 else
12974 {
12975 /* PR9722: Check for Thumb2 availability before
12976 generating a thumb2 nop instruction. */
12977 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12978 {
12979 inst.instruction = THUMB_OP16 (inst.instruction);
12980 inst.instruction |= inst.operands[0].imm << 4;
12981 }
12982 else
12983 inst.instruction = 0x46c0;
12984 }
12985 }
12986 else
12987 {
12988 constraint (inst.operands[0].present,
12989 _("Thumb does not support NOP with hints"));
12990 inst.instruction = 0x46c0;
12991 }
12992 }
12993
12994 static void
12995 do_t_neg (void)
12996 {
12997 if (unified_syntax)
12998 {
12999 bfd_boolean narrow;
13000
13001 if (THUMB_SETS_FLAGS (inst.instruction))
13002 narrow = !in_pred_block ();
13003 else
13004 narrow = in_pred_block ();
13005 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13006 narrow = FALSE;
13007 if (inst.size_req == 4)
13008 narrow = FALSE;
13009
13010 if (!narrow)
13011 {
13012 inst.instruction = THUMB_OP32 (inst.instruction);
13013 inst.instruction |= inst.operands[0].reg << 8;
13014 inst.instruction |= inst.operands[1].reg << 16;
13015 }
13016 else
13017 {
13018 inst.instruction = THUMB_OP16 (inst.instruction);
13019 inst.instruction |= inst.operands[0].reg;
13020 inst.instruction |= inst.operands[1].reg << 3;
13021 }
13022 }
13023 else
13024 {
13025 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13026 BAD_HIREG);
13027 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13028
13029 inst.instruction = THUMB_OP16 (inst.instruction);
13030 inst.instruction |= inst.operands[0].reg;
13031 inst.instruction |= inst.operands[1].reg << 3;
13032 }
13033 }
13034
13035 static void
13036 do_t_orn (void)
13037 {
13038 unsigned Rd, Rn;
13039
13040 Rd = inst.operands[0].reg;
13041 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13042
13043 reject_bad_reg (Rd);
13044 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13045 reject_bad_reg (Rn);
13046
13047 inst.instruction |= Rd << 8;
13048 inst.instruction |= Rn << 16;
13049
13050 if (!inst.operands[2].isreg)
13051 {
13052 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13053 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13054 }
13055 else
13056 {
13057 unsigned Rm;
13058
13059 Rm = inst.operands[2].reg;
13060 reject_bad_reg (Rm);
13061
13062 constraint (inst.operands[2].shifted
13063 && inst.operands[2].immisreg,
13064 _("shift must be constant"));
13065 encode_thumb32_shifted_operand (2);
13066 }
13067 }
13068
13069 static void
13070 do_t_pkhbt (void)
13071 {
13072 unsigned Rd, Rn, Rm;
13073
13074 Rd = inst.operands[0].reg;
13075 Rn = inst.operands[1].reg;
13076 Rm = inst.operands[2].reg;
13077
13078 reject_bad_reg (Rd);
13079 reject_bad_reg (Rn);
13080 reject_bad_reg (Rm);
13081
13082 inst.instruction |= Rd << 8;
13083 inst.instruction |= Rn << 16;
13084 inst.instruction |= Rm;
13085 if (inst.operands[3].present)
13086 {
13087 unsigned int val = inst.relocs[0].exp.X_add_number;
13088 constraint (inst.relocs[0].exp.X_op != O_constant,
13089 _("expression too complex"));
13090 inst.instruction |= (val & 0x1c) << 10;
13091 inst.instruction |= (val & 0x03) << 6;
13092 }
13093 }
13094
13095 static void
13096 do_t_pkhtb (void)
13097 {
13098 if (!inst.operands[3].present)
13099 {
13100 unsigned Rtmp;
13101
13102 inst.instruction &= ~0x00000020;
13103
13104 /* PR 10168. Swap the Rm and Rn registers. */
13105 Rtmp = inst.operands[1].reg;
13106 inst.operands[1].reg = inst.operands[2].reg;
13107 inst.operands[2].reg = Rtmp;
13108 }
13109 do_t_pkhbt ();
13110 }
13111
13112 static void
13113 do_t_pld (void)
13114 {
13115 if (inst.operands[0].immisreg)
13116 reject_bad_reg (inst.operands[0].imm);
13117
13118 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13119 }
13120
13121 static void
13122 do_t_push_pop (void)
13123 {
13124 unsigned mask;
13125
13126 constraint (inst.operands[0].writeback,
13127 _("push/pop do not support {reglist}^"));
13128 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13129 _("expression too complex"));
13130
13131 mask = inst.operands[0].imm;
13132 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13133 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13134 else if (inst.size_req != 4
13135 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13136 ? REG_LR : REG_PC)))
13137 {
13138 inst.instruction = THUMB_OP16 (inst.instruction);
13139 inst.instruction |= THUMB_PP_PC_LR;
13140 inst.instruction |= mask & 0xff;
13141 }
13142 else if (unified_syntax)
13143 {
13144 inst.instruction = THUMB_OP32 (inst.instruction);
13145 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13146 }
13147 else
13148 {
13149 inst.error = _("invalid register list to push/pop instruction");
13150 return;
13151 }
13152 }
13153
13154 static void
13155 do_t_clrm (void)
13156 {
13157 if (unified_syntax)
13158 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13159 else
13160 {
13161 inst.error = _("invalid register list to push/pop instruction");
13162 return;
13163 }
13164 }
13165
13166 static void
13167 do_t_vscclrm (void)
13168 {
13169 if (inst.operands[0].issingle)
13170 {
13171 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13172 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13173 inst.instruction |= inst.operands[0].imm;
13174 }
13175 else
13176 {
13177 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13178 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13179 inst.instruction |= 1 << 8;
13180 inst.instruction |= inst.operands[0].imm << 1;
13181 }
13182 }
13183
13184 static void
13185 do_t_rbit (void)
13186 {
13187 unsigned Rd, Rm;
13188
13189 Rd = inst.operands[0].reg;
13190 Rm = inst.operands[1].reg;
13191
13192 reject_bad_reg (Rd);
13193 reject_bad_reg (Rm);
13194
13195 inst.instruction |= Rd << 8;
13196 inst.instruction |= Rm << 16;
13197 inst.instruction |= Rm;
13198 }
13199
13200 static void
13201 do_t_rev (void)
13202 {
13203 unsigned Rd, Rm;
13204
13205 Rd = inst.operands[0].reg;
13206 Rm = inst.operands[1].reg;
13207
13208 reject_bad_reg (Rd);
13209 reject_bad_reg (Rm);
13210
13211 if (Rd <= 7 && Rm <= 7
13212 && inst.size_req != 4)
13213 {
13214 inst.instruction = THUMB_OP16 (inst.instruction);
13215 inst.instruction |= Rd;
13216 inst.instruction |= Rm << 3;
13217 }
13218 else if (unified_syntax)
13219 {
13220 inst.instruction = THUMB_OP32 (inst.instruction);
13221 inst.instruction |= Rd << 8;
13222 inst.instruction |= Rm << 16;
13223 inst.instruction |= Rm;
13224 }
13225 else
13226 inst.error = BAD_HIREG;
13227 }
13228
13229 static void
13230 do_t_rrx (void)
13231 {
13232 unsigned Rd, Rm;
13233
13234 Rd = inst.operands[0].reg;
13235 Rm = inst.operands[1].reg;
13236
13237 reject_bad_reg (Rd);
13238 reject_bad_reg (Rm);
13239
13240 inst.instruction |= Rd << 8;
13241 inst.instruction |= Rm;
13242 }
13243
13244 static void
13245 do_t_rsb (void)
13246 {
13247 unsigned Rd, Rs;
13248
13249 Rd = inst.operands[0].reg;
13250 Rs = (inst.operands[1].present
13251 ? inst.operands[1].reg /* Rd, Rs, foo */
13252 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13253
13254 reject_bad_reg (Rd);
13255 reject_bad_reg (Rs);
13256 if (inst.operands[2].isreg)
13257 reject_bad_reg (inst.operands[2].reg);
13258
13259 inst.instruction |= Rd << 8;
13260 inst.instruction |= Rs << 16;
13261 if (!inst.operands[2].isreg)
13262 {
13263 bfd_boolean narrow;
13264
13265 if ((inst.instruction & 0x00100000) != 0)
13266 narrow = !in_pred_block ();
13267 else
13268 narrow = in_pred_block ();
13269
13270 if (Rd > 7 || Rs > 7)
13271 narrow = FALSE;
13272
13273 if (inst.size_req == 4 || !unified_syntax)
13274 narrow = FALSE;
13275
13276 if (inst.relocs[0].exp.X_op != O_constant
13277 || inst.relocs[0].exp.X_add_number != 0)
13278 narrow = FALSE;
13279
13280 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13281 relaxation, but it doesn't seem worth the hassle. */
13282 if (narrow)
13283 {
13284 inst.relocs[0].type = BFD_RELOC_UNUSED;
13285 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13286 inst.instruction |= Rs << 3;
13287 inst.instruction |= Rd;
13288 }
13289 else
13290 {
13291 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13292 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13293 }
13294 }
13295 else
13296 encode_thumb32_shifted_operand (2);
13297 }
13298
13299 static void
13300 do_t_setend (void)
13301 {
13302 if (warn_on_deprecated
13303 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13304 as_tsktsk (_("setend use is deprecated for ARMv8"));
13305
13306 set_pred_insn_type (OUTSIDE_PRED_INSN);
13307 if (inst.operands[0].imm)
13308 inst.instruction |= 0x8;
13309 }
13310
13311 static void
13312 do_t_shift (void)
13313 {
13314 if (!inst.operands[1].present)
13315 inst.operands[1].reg = inst.operands[0].reg;
13316
13317 if (unified_syntax)
13318 {
13319 bfd_boolean narrow;
13320 int shift_kind;
13321
13322 switch (inst.instruction)
13323 {
13324 case T_MNEM_asr:
13325 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13326 case T_MNEM_lsl:
13327 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13328 case T_MNEM_lsr:
13329 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13330 case T_MNEM_ror:
13331 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13332 default: abort ();
13333 }
13334
13335 if (THUMB_SETS_FLAGS (inst.instruction))
13336 narrow = !in_pred_block ();
13337 else
13338 narrow = in_pred_block ();
13339 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13340 narrow = FALSE;
13341 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13342 narrow = FALSE;
13343 if (inst.operands[2].isreg
13344 && (inst.operands[1].reg != inst.operands[0].reg
13345 || inst.operands[2].reg > 7))
13346 narrow = FALSE;
13347 if (inst.size_req == 4)
13348 narrow = FALSE;
13349
13350 reject_bad_reg (inst.operands[0].reg);
13351 reject_bad_reg (inst.operands[1].reg);
13352
13353 if (!narrow)
13354 {
13355 if (inst.operands[2].isreg)
13356 {
13357 reject_bad_reg (inst.operands[2].reg);
13358 inst.instruction = THUMB_OP32 (inst.instruction);
13359 inst.instruction |= inst.operands[0].reg << 8;
13360 inst.instruction |= inst.operands[1].reg << 16;
13361 inst.instruction |= inst.operands[2].reg;
13362
13363 /* PR 12854: Error on extraneous shifts. */
13364 constraint (inst.operands[2].shifted,
13365 _("extraneous shift as part of operand to shift insn"));
13366 }
13367 else
13368 {
13369 inst.operands[1].shifted = 1;
13370 inst.operands[1].shift_kind = shift_kind;
13371 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13372 ? T_MNEM_movs : T_MNEM_mov);
13373 inst.instruction |= inst.operands[0].reg << 8;
13374 encode_thumb32_shifted_operand (1);
13375 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13376 inst.relocs[0].type = BFD_RELOC_UNUSED;
13377 }
13378 }
13379 else
13380 {
13381 if (inst.operands[2].isreg)
13382 {
13383 switch (shift_kind)
13384 {
13385 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13386 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13387 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13388 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13389 default: abort ();
13390 }
13391
13392 inst.instruction |= inst.operands[0].reg;
13393 inst.instruction |= inst.operands[2].reg << 3;
13394
13395 /* PR 12854: Error on extraneous shifts. */
13396 constraint (inst.operands[2].shifted,
13397 _("extraneous shift as part of operand to shift insn"));
13398 }
13399 else
13400 {
13401 switch (shift_kind)
13402 {
13403 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13404 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13405 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13406 default: abort ();
13407 }
13408 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13409 inst.instruction |= inst.operands[0].reg;
13410 inst.instruction |= inst.operands[1].reg << 3;
13411 }
13412 }
13413 }
13414 else
13415 {
13416 constraint (inst.operands[0].reg > 7
13417 || inst.operands[1].reg > 7, BAD_HIREG);
13418 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13419
13420 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13421 {
13422 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13423 constraint (inst.operands[0].reg != inst.operands[1].reg,
13424 _("source1 and dest must be same register"));
13425
13426 switch (inst.instruction)
13427 {
13428 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13429 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13430 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13431 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13432 default: abort ();
13433 }
13434
13435 inst.instruction |= inst.operands[0].reg;
13436 inst.instruction |= inst.operands[2].reg << 3;
13437
13438 /* PR 12854: Error on extraneous shifts. */
13439 constraint (inst.operands[2].shifted,
13440 _("extraneous shift as part of operand to shift insn"));
13441 }
13442 else
13443 {
13444 switch (inst.instruction)
13445 {
13446 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13447 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13448 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13449 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13450 default: abort ();
13451 }
13452 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13453 inst.instruction |= inst.operands[0].reg;
13454 inst.instruction |= inst.operands[1].reg << 3;
13455 }
13456 }
13457 }
13458
13459 static void
13460 do_t_simd (void)
13461 {
13462 unsigned Rd, Rn, Rm;
13463
13464 Rd = inst.operands[0].reg;
13465 Rn = inst.operands[1].reg;
13466 Rm = inst.operands[2].reg;
13467
13468 reject_bad_reg (Rd);
13469 reject_bad_reg (Rn);
13470 reject_bad_reg (Rm);
13471
13472 inst.instruction |= Rd << 8;
13473 inst.instruction |= Rn << 16;
13474 inst.instruction |= Rm;
13475 }
13476
13477 static void
13478 do_t_simd2 (void)
13479 {
13480 unsigned Rd, Rn, Rm;
13481
13482 Rd = inst.operands[0].reg;
13483 Rm = inst.operands[1].reg;
13484 Rn = inst.operands[2].reg;
13485
13486 reject_bad_reg (Rd);
13487 reject_bad_reg (Rn);
13488 reject_bad_reg (Rm);
13489
13490 inst.instruction |= Rd << 8;
13491 inst.instruction |= Rn << 16;
13492 inst.instruction |= Rm;
13493 }
13494
13495 static void
13496 do_t_smc (void)
13497 {
13498 unsigned int value = inst.relocs[0].exp.X_add_number;
13499 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13500 _("SMC is not permitted on this architecture"));
13501 constraint (inst.relocs[0].exp.X_op != O_constant,
13502 _("expression too complex"));
13503 inst.relocs[0].type = BFD_RELOC_UNUSED;
13504 inst.instruction |= (value & 0xf000) >> 12;
13505 inst.instruction |= (value & 0x0ff0);
13506 inst.instruction |= (value & 0x000f) << 16;
13507 /* PR gas/15623: SMC instructions must be last in an IT block. */
13508 set_pred_insn_type_last ();
13509 }
13510
13511 static void
13512 do_t_hvc (void)
13513 {
13514 unsigned int value = inst.relocs[0].exp.X_add_number;
13515
13516 inst.relocs[0].type = BFD_RELOC_UNUSED;
13517 inst.instruction |= (value & 0x0fff);
13518 inst.instruction |= (value & 0xf000) << 4;
13519 }
13520
13521 static void
13522 do_t_ssat_usat (int bias)
13523 {
13524 unsigned Rd, Rn;
13525
13526 Rd = inst.operands[0].reg;
13527 Rn = inst.operands[2].reg;
13528
13529 reject_bad_reg (Rd);
13530 reject_bad_reg (Rn);
13531
13532 inst.instruction |= Rd << 8;
13533 inst.instruction |= inst.operands[1].imm - bias;
13534 inst.instruction |= Rn << 16;
13535
13536 if (inst.operands[3].present)
13537 {
13538 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13539
13540 inst.relocs[0].type = BFD_RELOC_UNUSED;
13541
13542 constraint (inst.relocs[0].exp.X_op != O_constant,
13543 _("expression too complex"));
13544
13545 if (shift_amount != 0)
13546 {
13547 constraint (shift_amount > 31,
13548 _("shift expression is too large"));
13549
13550 if (inst.operands[3].shift_kind == SHIFT_ASR)
13551 inst.instruction |= 0x00200000; /* sh bit. */
13552
13553 inst.instruction |= (shift_amount & 0x1c) << 10;
13554 inst.instruction |= (shift_amount & 0x03) << 6;
13555 }
13556 }
13557 }
13558
13559 static void
13560 do_t_ssat (void)
13561 {
13562 do_t_ssat_usat (1);
13563 }
13564
13565 static void
13566 do_t_ssat16 (void)
13567 {
13568 unsigned Rd, Rn;
13569
13570 Rd = inst.operands[0].reg;
13571 Rn = inst.operands[2].reg;
13572
13573 reject_bad_reg (Rd);
13574 reject_bad_reg (Rn);
13575
13576 inst.instruction |= Rd << 8;
13577 inst.instruction |= inst.operands[1].imm - 1;
13578 inst.instruction |= Rn << 16;
13579 }
13580
13581 static void
13582 do_t_strex (void)
13583 {
13584 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13585 || inst.operands[2].postind || inst.operands[2].writeback
13586 || inst.operands[2].immisreg || inst.operands[2].shifted
13587 || inst.operands[2].negative,
13588 BAD_ADDR_MODE);
13589
13590 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13591
13592 inst.instruction |= inst.operands[0].reg << 8;
13593 inst.instruction |= inst.operands[1].reg << 12;
13594 inst.instruction |= inst.operands[2].reg << 16;
13595 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13596 }
13597
13598 static void
13599 do_t_strexd (void)
13600 {
13601 if (!inst.operands[2].present)
13602 inst.operands[2].reg = inst.operands[1].reg + 1;
13603
13604 constraint (inst.operands[0].reg == inst.operands[1].reg
13605 || inst.operands[0].reg == inst.operands[2].reg
13606 || inst.operands[0].reg == inst.operands[3].reg,
13607 BAD_OVERLAP);
13608
13609 inst.instruction |= inst.operands[0].reg;
13610 inst.instruction |= inst.operands[1].reg << 12;
13611 inst.instruction |= inst.operands[2].reg << 8;
13612 inst.instruction |= inst.operands[3].reg << 16;
13613 }
13614
13615 static void
13616 do_t_sxtah (void)
13617 {
13618 unsigned Rd, Rn, Rm;
13619
13620 Rd = inst.operands[0].reg;
13621 Rn = inst.operands[1].reg;
13622 Rm = inst.operands[2].reg;
13623
13624 reject_bad_reg (Rd);
13625 reject_bad_reg (Rn);
13626 reject_bad_reg (Rm);
13627
13628 inst.instruction |= Rd << 8;
13629 inst.instruction |= Rn << 16;
13630 inst.instruction |= Rm;
13631 inst.instruction |= inst.operands[3].imm << 4;
13632 }
13633
13634 static void
13635 do_t_sxth (void)
13636 {
13637 unsigned Rd, Rm;
13638
13639 Rd = inst.operands[0].reg;
13640 Rm = inst.operands[1].reg;
13641
13642 reject_bad_reg (Rd);
13643 reject_bad_reg (Rm);
13644
13645 if (inst.instruction <= 0xffff
13646 && inst.size_req != 4
13647 && Rd <= 7 && Rm <= 7
13648 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13649 {
13650 inst.instruction = THUMB_OP16 (inst.instruction);
13651 inst.instruction |= Rd;
13652 inst.instruction |= Rm << 3;
13653 }
13654 else if (unified_syntax)
13655 {
13656 if (inst.instruction <= 0xffff)
13657 inst.instruction = THUMB_OP32 (inst.instruction);
13658 inst.instruction |= Rd << 8;
13659 inst.instruction |= Rm;
13660 inst.instruction |= inst.operands[2].imm << 4;
13661 }
13662 else
13663 {
13664 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13665 _("Thumb encoding does not support rotation"));
13666 constraint (1, BAD_HIREG);
13667 }
13668 }
13669
13670 static void
13671 do_t_swi (void)
13672 {
13673 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13674 }
13675
13676 static void
13677 do_t_tb (void)
13678 {
13679 unsigned Rn, Rm;
13680 int half;
13681
13682 half = (inst.instruction & 0x10) != 0;
13683 set_pred_insn_type_last ();
13684 constraint (inst.operands[0].immisreg,
13685 _("instruction requires register index"));
13686
13687 Rn = inst.operands[0].reg;
13688 Rm = inst.operands[0].imm;
13689
13690 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13691 constraint (Rn == REG_SP, BAD_SP);
13692 reject_bad_reg (Rm);
13693
13694 constraint (!half && inst.operands[0].shifted,
13695 _("instruction does not allow shifted index"));
13696 inst.instruction |= (Rn << 16) | Rm;
13697 }
13698
13699 static void
13700 do_t_udf (void)
13701 {
13702 if (!inst.operands[0].present)
13703 inst.operands[0].imm = 0;
13704
13705 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13706 {
13707 constraint (inst.size_req == 2,
13708 _("immediate value out of range"));
13709 inst.instruction = THUMB_OP32 (inst.instruction);
13710 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13711 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13712 }
13713 else
13714 {
13715 inst.instruction = THUMB_OP16 (inst.instruction);
13716 inst.instruction |= inst.operands[0].imm;
13717 }
13718
13719 set_pred_insn_type (NEUTRAL_IT_INSN);
13720 }
13721
13722
13723 static void
13724 do_t_usat (void)
13725 {
13726 do_t_ssat_usat (0);
13727 }
13728
13729 static void
13730 do_t_usat16 (void)
13731 {
13732 unsigned Rd, Rn;
13733
13734 Rd = inst.operands[0].reg;
13735 Rn = inst.operands[2].reg;
13736
13737 reject_bad_reg (Rd);
13738 reject_bad_reg (Rn);
13739
13740 inst.instruction |= Rd << 8;
13741 inst.instruction |= inst.operands[1].imm;
13742 inst.instruction |= Rn << 16;
13743 }
13744
13745 /* Checking the range of the branch offset (VAL) with NBITS bits
13746 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13747 static int
13748 v8_1_branch_value_check (int val, int nbits, int is_signed)
13749 {
13750 gas_assert (nbits > 0 && nbits <= 32);
13751 if (is_signed)
13752 {
13753 int cmp = (1 << (nbits - 1));
13754 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13755 return FAIL;
13756 }
13757 else
13758 {
13759 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13760 return FAIL;
13761 }
13762 return SUCCESS;
13763 }
13764
13765 /* For branches in Armv8.1-M Mainline. */
13766 static void
13767 do_t_branch_future (void)
13768 {
13769 unsigned long insn = inst.instruction;
13770
13771 inst.instruction = THUMB_OP32 (inst.instruction);
13772 if (inst.operands[0].hasreloc == 0)
13773 {
13774 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13775 as_bad (BAD_BRANCH_OFF);
13776
13777 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13778 }
13779 else
13780 {
13781 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13782 inst.relocs[0].pc_rel = 1;
13783 }
13784
13785 switch (insn)
13786 {
13787 case T_MNEM_bf:
13788 if (inst.operands[1].hasreloc == 0)
13789 {
13790 int val = inst.operands[1].imm;
13791 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13792 as_bad (BAD_BRANCH_OFF);
13793
13794 int immA = (val & 0x0001f000) >> 12;
13795 int immB = (val & 0x00000ffc) >> 2;
13796 int immC = (val & 0x00000002) >> 1;
13797 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13798 }
13799 else
13800 {
13801 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13802 inst.relocs[1].pc_rel = 1;
13803 }
13804 break;
13805
13806 case T_MNEM_bfl:
13807 if (inst.operands[1].hasreloc == 0)
13808 {
13809 int val = inst.operands[1].imm;
13810 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13811 as_bad (BAD_BRANCH_OFF);
13812
13813 int immA = (val & 0x0007f000) >> 12;
13814 int immB = (val & 0x00000ffc) >> 2;
13815 int immC = (val & 0x00000002) >> 1;
13816 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13817 }
13818 else
13819 {
13820 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13821 inst.relocs[1].pc_rel = 1;
13822 }
13823 break;
13824
13825 case T_MNEM_bfcsel:
13826 /* Operand 1. */
13827 if (inst.operands[1].hasreloc == 0)
13828 {
13829 int val = inst.operands[1].imm;
13830 int immA = (val & 0x00001000) >> 12;
13831 int immB = (val & 0x00000ffc) >> 2;
13832 int immC = (val & 0x00000002) >> 1;
13833 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13834 }
13835 else
13836 {
13837 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13838 inst.relocs[1].pc_rel = 1;
13839 }
13840
13841 /* Operand 2. */
13842 if (inst.operands[2].hasreloc == 0)
13843 {
13844 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13845 int val2 = inst.operands[2].imm;
13846 int val0 = inst.operands[0].imm & 0x1f;
13847 int diff = val2 - val0;
13848 if (diff == 4)
13849 inst.instruction |= 1 << 17; /* T bit. */
13850 else if (diff != 2)
13851 as_bad (_("out of range label-relative fixup value"));
13852 }
13853 else
13854 {
13855 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13856 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13857 inst.relocs[2].pc_rel = 1;
13858 }
13859
13860 /* Operand 3. */
13861 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13862 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13863 break;
13864
13865 case T_MNEM_bfx:
13866 case T_MNEM_bflx:
13867 inst.instruction |= inst.operands[1].reg << 16;
13868 break;
13869
13870 default: abort ();
13871 }
13872 }
13873
13874 /* Helper function for do_t_loloop to handle relocations. */
13875 static void
13876 v8_1_loop_reloc (int is_le)
13877 {
13878 if (inst.relocs[0].exp.X_op == O_constant)
13879 {
13880 int value = inst.relocs[0].exp.X_add_number;
13881 value = (is_le) ? -value : value;
13882
13883 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13884 as_bad (BAD_BRANCH_OFF);
13885
13886 int imml, immh;
13887
13888 immh = (value & 0x00000ffc) >> 2;
13889 imml = (value & 0x00000002) >> 1;
13890
13891 inst.instruction |= (imml << 11) | (immh << 1);
13892 }
13893 else
13894 {
13895 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13896 inst.relocs[0].pc_rel = 1;
13897 }
13898 }
13899
13900 /* To handle the Scalar Low Overhead Loop instructions
13901 in Armv8.1-M Mainline. */
13902 static void
13903 do_t_loloop (void)
13904 {
13905 unsigned long insn = inst.instruction;
13906
13907 set_pred_insn_type (OUTSIDE_PRED_INSN);
13908 inst.instruction = THUMB_OP32 (inst.instruction);
13909
13910 switch (insn)
13911 {
13912 case T_MNEM_le:
13913 /* le <label>. */
13914 if (!inst.operands[0].present)
13915 inst.instruction |= 1 << 21;
13916
13917 v8_1_loop_reloc (TRUE);
13918 break;
13919
13920 case T_MNEM_wls:
13921 v8_1_loop_reloc (FALSE);
13922 /* Fall through. */
13923 case T_MNEM_dls:
13924 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13925 inst.instruction |= (inst.operands[1].reg << 16);
13926 break;
13927
13928 default: abort();
13929 }
13930 }
13931
13932 /* MVE instruction encoder helpers. */
13933 #define M_MNEM_vabav 0xee800f01
13934 #define M_MNEM_vmladav 0xeef00e00
13935 #define M_MNEM_vmladava 0xeef00e20
13936 #define M_MNEM_vmladavx 0xeef01e00
13937 #define M_MNEM_vmladavax 0xeef01e20
13938 #define M_MNEM_vmlsdav 0xeef00e01
13939 #define M_MNEM_vmlsdava 0xeef00e21
13940 #define M_MNEM_vmlsdavx 0xeef01e01
13941 #define M_MNEM_vmlsdavax 0xeef01e21
13942 #define M_MNEM_vmullt 0xee011e00
13943 #define M_MNEM_vmullb 0xee010e00
13944 #define M_MNEM_vst20 0xfc801e00
13945 #define M_MNEM_vst21 0xfc801e20
13946 #define M_MNEM_vst40 0xfc801e01
13947 #define M_MNEM_vst41 0xfc801e21
13948 #define M_MNEM_vst42 0xfc801e41
13949 #define M_MNEM_vst43 0xfc801e61
13950 #define M_MNEM_vld20 0xfc901e00
13951 #define M_MNEM_vld21 0xfc901e20
13952 #define M_MNEM_vld40 0xfc901e01
13953 #define M_MNEM_vld41 0xfc901e21
13954 #define M_MNEM_vld42 0xfc901e41
13955 #define M_MNEM_vld43 0xfc901e61
13956 #define M_MNEM_vstrb 0xec000e00
13957 #define M_MNEM_vstrh 0xec000e10
13958 #define M_MNEM_vstrw 0xec000e40
13959 #define M_MNEM_vstrd 0xec000e50
13960 #define M_MNEM_vldrb 0xec100e00
13961 #define M_MNEM_vldrh 0xec100e10
13962 #define M_MNEM_vldrw 0xec100e40
13963 #define M_MNEM_vldrd 0xec100e50
13964
13965 /* Neon instruction encoder helpers. */
13966
13967 /* Encodings for the different types for various Neon opcodes. */
13968
13969 /* An "invalid" code for the following tables. */
13970 #define N_INV -1u
13971
13972 struct neon_tab_entry
13973 {
13974 unsigned integer;
13975 unsigned float_or_poly;
13976 unsigned scalar_or_imm;
13977 };
13978
13979 /* Map overloaded Neon opcodes to their respective encodings. */
13980 #define NEON_ENC_TAB \
13981 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13982 X(vabdl, 0x0800700, N_INV, N_INV), \
13983 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13984 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13985 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13986 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13987 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13988 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13989 X(vaddl, 0x0800000, N_INV, N_INV), \
13990 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13991 X(vsubl, 0x0800200, N_INV, N_INV), \
13992 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13993 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13994 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13995 /* Register variants of the following two instructions are encoded as
13996 vcge / vcgt with the operands reversed. */ \
13997 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13998 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13999 X(vfma, N_INV, 0x0000c10, N_INV), \
14000 X(vfms, N_INV, 0x0200c10, N_INV), \
14001 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14002 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14003 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14004 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14005 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14006 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14007 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14008 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14009 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14010 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14011 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14012 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14013 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14014 X(vshl, 0x0000400, N_INV, 0x0800510), \
14015 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14016 X(vand, 0x0000110, N_INV, 0x0800030), \
14017 X(vbic, 0x0100110, N_INV, 0x0800030), \
14018 X(veor, 0x1000110, N_INV, N_INV), \
14019 X(vorn, 0x0300110, N_INV, 0x0800010), \
14020 X(vorr, 0x0200110, N_INV, 0x0800010), \
14021 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14022 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14023 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14024 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14025 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14026 X(vst1, 0x0000000, 0x0800000, N_INV), \
14027 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14028 X(vst2, 0x0000100, 0x0800100, N_INV), \
14029 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14030 X(vst3, 0x0000200, 0x0800200, N_INV), \
14031 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14032 X(vst4, 0x0000300, 0x0800300, N_INV), \
14033 X(vmovn, 0x1b20200, N_INV, N_INV), \
14034 X(vtrn, 0x1b20080, N_INV, N_INV), \
14035 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14036 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14037 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14038 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14039 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14040 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14041 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14042 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14043 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14044 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14045 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14046 X(vseleq, 0xe000a00, N_INV, N_INV), \
14047 X(vselvs, 0xe100a00, N_INV, N_INV), \
14048 X(vselge, 0xe200a00, N_INV, N_INV), \
14049 X(vselgt, 0xe300a00, N_INV, N_INV), \
14050 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14051 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14052 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14053 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14054 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14055 X(aes, 0x3b00300, N_INV, N_INV), \
14056 X(sha3op, 0x2000c00, N_INV, N_INV), \
14057 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14058 X(sha2op, 0x3ba0380, N_INV, N_INV)
14059
14060 enum neon_opc
14061 {
14062 #define X(OPC,I,F,S) N_MNEM_##OPC
14063 NEON_ENC_TAB
14064 #undef X
14065 };
14066
14067 static const struct neon_tab_entry neon_enc_tab[] =
14068 {
14069 #define X(OPC,I,F,S) { (I), (F), (S) }
14070 NEON_ENC_TAB
14071 #undef X
14072 };
14073
14074 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14075 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14076 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14077 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14078 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14079 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14080 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14081 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14082 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14083 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14084 #define NEON_ENC_SINGLE_(X) \
14085 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14086 #define NEON_ENC_DOUBLE_(X) \
14087 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14088 #define NEON_ENC_FPV8_(X) \
14089 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14090
14091 #define NEON_ENCODE(type, inst) \
14092 do \
14093 { \
14094 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14095 inst.is_neon = 1; \
14096 } \
14097 while (0)
14098
14099 #define check_neon_suffixes \
14100 do \
14101 { \
14102 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14103 { \
14104 as_bad (_("invalid neon suffix for non neon instruction")); \
14105 return; \
14106 } \
14107 } \
14108 while (0)
14109
14110 /* Define shapes for instruction operands. The following mnemonic characters
14111 are used in this table:
14112
14113 F - VFP S<n> register
14114 D - Neon D<n> register
14115 Q - Neon Q<n> register
14116 I - Immediate
14117 S - Scalar
14118 R - ARM register
14119 L - D<n> register list
14120
14121 This table is used to generate various data:
14122 - enumerations of the form NS_DDR to be used as arguments to
14123 neon_select_shape.
14124 - a table classifying shapes into single, double, quad, mixed.
14125 - a table used to drive neon_select_shape. */
14126
14127 #define NEON_SHAPE_DEF \
14128 X(3, (R, Q, Q), QUAD), \
14129 X(3, (D, D, D), DOUBLE), \
14130 X(3, (Q, Q, Q), QUAD), \
14131 X(3, (D, D, I), DOUBLE), \
14132 X(3, (Q, Q, I), QUAD), \
14133 X(3, (D, D, S), DOUBLE), \
14134 X(3, (Q, Q, S), QUAD), \
14135 X(3, (Q, Q, R), QUAD), \
14136 X(2, (D, D), DOUBLE), \
14137 X(2, (Q, Q), QUAD), \
14138 X(2, (D, S), DOUBLE), \
14139 X(2, (Q, S), QUAD), \
14140 X(2, (D, R), DOUBLE), \
14141 X(2, (Q, R), QUAD), \
14142 X(2, (D, I), DOUBLE), \
14143 X(2, (Q, I), QUAD), \
14144 X(3, (D, L, D), DOUBLE), \
14145 X(2, (D, Q), MIXED), \
14146 X(2, (Q, D), MIXED), \
14147 X(3, (D, Q, I), MIXED), \
14148 X(3, (Q, D, I), MIXED), \
14149 X(3, (Q, D, D), MIXED), \
14150 X(3, (D, Q, Q), MIXED), \
14151 X(3, (Q, Q, D), MIXED), \
14152 X(3, (Q, D, S), MIXED), \
14153 X(3, (D, Q, S), MIXED), \
14154 X(4, (D, D, D, I), DOUBLE), \
14155 X(4, (Q, Q, Q, I), QUAD), \
14156 X(4, (D, D, S, I), DOUBLE), \
14157 X(4, (Q, Q, S, I), QUAD), \
14158 X(2, (F, F), SINGLE), \
14159 X(3, (F, F, F), SINGLE), \
14160 X(2, (F, I), SINGLE), \
14161 X(2, (F, D), MIXED), \
14162 X(2, (D, F), MIXED), \
14163 X(3, (F, F, I), MIXED), \
14164 X(4, (R, R, F, F), SINGLE), \
14165 X(4, (F, F, R, R), SINGLE), \
14166 X(3, (D, R, R), DOUBLE), \
14167 X(3, (R, R, D), DOUBLE), \
14168 X(2, (S, R), SINGLE), \
14169 X(2, (R, S), SINGLE), \
14170 X(2, (F, R), SINGLE), \
14171 X(2, (R, F), SINGLE), \
14172 /* Half float shape supported so far. */\
14173 X (2, (H, D), MIXED), \
14174 X (2, (D, H), MIXED), \
14175 X (2, (H, F), MIXED), \
14176 X (2, (F, H), MIXED), \
14177 X (2, (H, H), HALF), \
14178 X (2, (H, R), HALF), \
14179 X (2, (R, H), HALF), \
14180 X (2, (H, I), HALF), \
14181 X (3, (H, H, H), HALF), \
14182 X (3, (H, F, I), MIXED), \
14183 X (3, (F, H, I), MIXED), \
14184 X (3, (D, H, H), MIXED), \
14185 X (3, (D, H, S), MIXED)
14186
14187 #define S2(A,B) NS_##A##B
14188 #define S3(A,B,C) NS_##A##B##C
14189 #define S4(A,B,C,D) NS_##A##B##C##D
14190
14191 #define X(N, L, C) S##N L
14192
14193 enum neon_shape
14194 {
14195 NEON_SHAPE_DEF,
14196 NS_NULL
14197 };
14198
14199 #undef X
14200 #undef S2
14201 #undef S3
14202 #undef S4
14203
14204 enum neon_shape_class
14205 {
14206 SC_HALF,
14207 SC_SINGLE,
14208 SC_DOUBLE,
14209 SC_QUAD,
14210 SC_MIXED
14211 };
14212
14213 #define X(N, L, C) SC_##C
14214
14215 static enum neon_shape_class neon_shape_class[] =
14216 {
14217 NEON_SHAPE_DEF
14218 };
14219
14220 #undef X
14221
14222 enum neon_shape_el
14223 {
14224 SE_H,
14225 SE_F,
14226 SE_D,
14227 SE_Q,
14228 SE_I,
14229 SE_S,
14230 SE_R,
14231 SE_L
14232 };
14233
14234 /* Register widths of above. */
14235 static unsigned neon_shape_el_size[] =
14236 {
14237 16,
14238 32,
14239 64,
14240 128,
14241 0,
14242 32,
14243 32,
14244 0
14245 };
14246
14247 struct neon_shape_info
14248 {
14249 unsigned els;
14250 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14251 };
14252
14253 #define S2(A,B) { SE_##A, SE_##B }
14254 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14255 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14256
14257 #define X(N, L, C) { N, S##N L }
14258
14259 static struct neon_shape_info neon_shape_tab[] =
14260 {
14261 NEON_SHAPE_DEF
14262 };
14263
14264 #undef X
14265 #undef S2
14266 #undef S3
14267 #undef S4
14268
14269 /* Bit masks used in type checking given instructions.
14270 'N_EQK' means the type must be the same as (or based on in some way) the key
14271 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14272 set, various other bits can be set as well in order to modify the meaning of
14273 the type constraint. */
14274
14275 enum neon_type_mask
14276 {
14277 N_S8 = 0x0000001,
14278 N_S16 = 0x0000002,
14279 N_S32 = 0x0000004,
14280 N_S64 = 0x0000008,
14281 N_U8 = 0x0000010,
14282 N_U16 = 0x0000020,
14283 N_U32 = 0x0000040,
14284 N_U64 = 0x0000080,
14285 N_I8 = 0x0000100,
14286 N_I16 = 0x0000200,
14287 N_I32 = 0x0000400,
14288 N_I64 = 0x0000800,
14289 N_8 = 0x0001000,
14290 N_16 = 0x0002000,
14291 N_32 = 0x0004000,
14292 N_64 = 0x0008000,
14293 N_P8 = 0x0010000,
14294 N_P16 = 0x0020000,
14295 N_F16 = 0x0040000,
14296 N_F32 = 0x0080000,
14297 N_F64 = 0x0100000,
14298 N_P64 = 0x0200000,
14299 N_KEY = 0x1000000, /* Key element (main type specifier). */
14300 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14301 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14302 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14303 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14304 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14305 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14306 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14307 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14308 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14309 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14310 N_UTYP = 0,
14311 N_MAX_NONSPECIAL = N_P64
14312 };
14313
14314 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14315
14316 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14317 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14318 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14319 #define N_S_32 (N_S8 | N_S16 | N_S32)
14320 #define N_F_16_32 (N_F16 | N_F32)
14321 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14322 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14323 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14324 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14325 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14326 #define N_F_MVE (N_F16 | N_F32)
14327 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14328
14329 /* Pass this as the first type argument to neon_check_type to ignore types
14330 altogether. */
14331 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14332
14333 /* Select a "shape" for the current instruction (describing register types or
14334 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14335 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14336 function of operand parsing, so this function doesn't need to be called.
14337 Shapes should be listed in order of decreasing length. */
14338
14339 static enum neon_shape
14340 neon_select_shape (enum neon_shape shape, ...)
14341 {
14342 va_list ap;
14343 enum neon_shape first_shape = shape;
14344
14345 /* Fix missing optional operands. FIXME: we don't know at this point how
14346 many arguments we should have, so this makes the assumption that we have
14347 > 1. This is true of all current Neon opcodes, I think, but may not be
14348 true in the future. */
14349 if (!inst.operands[1].present)
14350 inst.operands[1] = inst.operands[0];
14351
14352 va_start (ap, shape);
14353
14354 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14355 {
14356 unsigned j;
14357 int matches = 1;
14358
14359 for (j = 0; j < neon_shape_tab[shape].els; j++)
14360 {
14361 if (!inst.operands[j].present)
14362 {
14363 matches = 0;
14364 break;
14365 }
14366
14367 switch (neon_shape_tab[shape].el[j])
14368 {
14369 /* If a .f16, .16, .u16, .s16 type specifier is given over
14370 a VFP single precision register operand, it's essentially
14371 means only half of the register is used.
14372
14373 If the type specifier is given after the mnemonics, the
14374 information is stored in inst.vectype. If the type specifier
14375 is given after register operand, the information is stored
14376 in inst.operands[].vectype.
14377
14378 When there is only one type specifier, and all the register
14379 operands are the same type of hardware register, the type
14380 specifier applies to all register operands.
14381
14382 If no type specifier is given, the shape is inferred from
14383 operand information.
14384
14385 for example:
14386 vadd.f16 s0, s1, s2: NS_HHH
14387 vabs.f16 s0, s1: NS_HH
14388 vmov.f16 s0, r1: NS_HR
14389 vmov.f16 r0, s1: NS_RH
14390 vcvt.f16 r0, s1: NS_RH
14391 vcvt.f16.s32 s2, s2, #29: NS_HFI
14392 vcvt.f16.s32 s2, s2: NS_HF
14393 */
14394 case SE_H:
14395 if (!(inst.operands[j].isreg
14396 && inst.operands[j].isvec
14397 && inst.operands[j].issingle
14398 && !inst.operands[j].isquad
14399 && ((inst.vectype.elems == 1
14400 && inst.vectype.el[0].size == 16)
14401 || (inst.vectype.elems > 1
14402 && inst.vectype.el[j].size == 16)
14403 || (inst.vectype.elems == 0
14404 && inst.operands[j].vectype.type != NT_invtype
14405 && inst.operands[j].vectype.size == 16))))
14406 matches = 0;
14407 break;
14408
14409 case SE_F:
14410 if (!(inst.operands[j].isreg
14411 && inst.operands[j].isvec
14412 && inst.operands[j].issingle
14413 && !inst.operands[j].isquad
14414 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14415 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14416 || (inst.vectype.elems == 0
14417 && (inst.operands[j].vectype.size == 32
14418 || inst.operands[j].vectype.type == NT_invtype)))))
14419 matches = 0;
14420 break;
14421
14422 case SE_D:
14423 if (!(inst.operands[j].isreg
14424 && inst.operands[j].isvec
14425 && !inst.operands[j].isquad
14426 && !inst.operands[j].issingle))
14427 matches = 0;
14428 break;
14429
14430 case SE_R:
14431 if (!(inst.operands[j].isreg
14432 && !inst.operands[j].isvec))
14433 matches = 0;
14434 break;
14435
14436 case SE_Q:
14437 if (!(inst.operands[j].isreg
14438 && inst.operands[j].isvec
14439 && inst.operands[j].isquad
14440 && !inst.operands[j].issingle))
14441 matches = 0;
14442 break;
14443
14444 case SE_I:
14445 if (!(!inst.operands[j].isreg
14446 && !inst.operands[j].isscalar))
14447 matches = 0;
14448 break;
14449
14450 case SE_S:
14451 if (!(!inst.operands[j].isreg
14452 && inst.operands[j].isscalar))
14453 matches = 0;
14454 break;
14455
14456 case SE_L:
14457 break;
14458 }
14459 if (!matches)
14460 break;
14461 }
14462 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14463 /* We've matched all the entries in the shape table, and we don't
14464 have any left over operands which have not been matched. */
14465 break;
14466 }
14467
14468 va_end (ap);
14469
14470 if (shape == NS_NULL && first_shape != NS_NULL)
14471 first_error (_("invalid instruction shape"));
14472
14473 return shape;
14474 }
14475
14476 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14477 means the Q bit should be set). */
14478
14479 static int
14480 neon_quad (enum neon_shape shape)
14481 {
14482 return neon_shape_class[shape] == SC_QUAD;
14483 }
14484
14485 static void
14486 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14487 unsigned *g_size)
14488 {
14489 /* Allow modification to be made to types which are constrained to be
14490 based on the key element, based on bits set alongside N_EQK. */
14491 if ((typebits & N_EQK) != 0)
14492 {
14493 if ((typebits & N_HLF) != 0)
14494 *g_size /= 2;
14495 else if ((typebits & N_DBL) != 0)
14496 *g_size *= 2;
14497 if ((typebits & N_SGN) != 0)
14498 *g_type = NT_signed;
14499 else if ((typebits & N_UNS) != 0)
14500 *g_type = NT_unsigned;
14501 else if ((typebits & N_INT) != 0)
14502 *g_type = NT_integer;
14503 else if ((typebits & N_FLT) != 0)
14504 *g_type = NT_float;
14505 else if ((typebits & N_SIZ) != 0)
14506 *g_type = NT_untyped;
14507 }
14508 }
14509
14510 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14511 operand type, i.e. the single type specified in a Neon instruction when it
14512 is the only one given. */
14513
14514 static struct neon_type_el
14515 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14516 {
14517 struct neon_type_el dest = *key;
14518
14519 gas_assert ((thisarg & N_EQK) != 0);
14520
14521 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14522
14523 return dest;
14524 }
14525
14526 /* Convert Neon type and size into compact bitmask representation. */
14527
14528 static enum neon_type_mask
14529 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14530 {
14531 switch (type)
14532 {
14533 case NT_untyped:
14534 switch (size)
14535 {
14536 case 8: return N_8;
14537 case 16: return N_16;
14538 case 32: return N_32;
14539 case 64: return N_64;
14540 default: ;
14541 }
14542 break;
14543
14544 case NT_integer:
14545 switch (size)
14546 {
14547 case 8: return N_I8;
14548 case 16: return N_I16;
14549 case 32: return N_I32;
14550 case 64: return N_I64;
14551 default: ;
14552 }
14553 break;
14554
14555 case NT_float:
14556 switch (size)
14557 {
14558 case 16: return N_F16;
14559 case 32: return N_F32;
14560 case 64: return N_F64;
14561 default: ;
14562 }
14563 break;
14564
14565 case NT_poly:
14566 switch (size)
14567 {
14568 case 8: return N_P8;
14569 case 16: return N_P16;
14570 case 64: return N_P64;
14571 default: ;
14572 }
14573 break;
14574
14575 case NT_signed:
14576 switch (size)
14577 {
14578 case 8: return N_S8;
14579 case 16: return N_S16;
14580 case 32: return N_S32;
14581 case 64: return N_S64;
14582 default: ;
14583 }
14584 break;
14585
14586 case NT_unsigned:
14587 switch (size)
14588 {
14589 case 8: return N_U8;
14590 case 16: return N_U16;
14591 case 32: return N_U32;
14592 case 64: return N_U64;
14593 default: ;
14594 }
14595 break;
14596
14597 default: ;
14598 }
14599
14600 return N_UTYP;
14601 }
14602
14603 /* Convert compact Neon bitmask type representation to a type and size. Only
14604 handles the case where a single bit is set in the mask. */
14605
14606 static int
14607 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14608 enum neon_type_mask mask)
14609 {
14610 if ((mask & N_EQK) != 0)
14611 return FAIL;
14612
14613 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14614 *size = 8;
14615 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14616 *size = 16;
14617 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14618 *size = 32;
14619 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14620 *size = 64;
14621 else
14622 return FAIL;
14623
14624 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14625 *type = NT_signed;
14626 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14627 *type = NT_unsigned;
14628 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14629 *type = NT_integer;
14630 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14631 *type = NT_untyped;
14632 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14633 *type = NT_poly;
14634 else if ((mask & (N_F_ALL)) != 0)
14635 *type = NT_float;
14636 else
14637 return FAIL;
14638
14639 return SUCCESS;
14640 }
14641
14642 /* Modify a bitmask of allowed types. This is only needed for type
14643 relaxation. */
14644
14645 static unsigned
14646 modify_types_allowed (unsigned allowed, unsigned mods)
14647 {
14648 unsigned size;
14649 enum neon_el_type type;
14650 unsigned destmask;
14651 int i;
14652
14653 destmask = 0;
14654
14655 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14656 {
14657 if (el_type_of_type_chk (&type, &size,
14658 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14659 {
14660 neon_modify_type_size (mods, &type, &size);
14661 destmask |= type_chk_of_el_type (type, size);
14662 }
14663 }
14664
14665 return destmask;
14666 }
14667
14668 /* Check type and return type classification.
14669 The manual states (paraphrase): If one datatype is given, it indicates the
14670 type given in:
14671 - the second operand, if there is one
14672 - the operand, if there is no second operand
14673 - the result, if there are no operands.
14674 This isn't quite good enough though, so we use a concept of a "key" datatype
14675 which is set on a per-instruction basis, which is the one which matters when
14676 only one data type is written.
14677 Note: this function has side-effects (e.g. filling in missing operands). All
14678 Neon instructions should call it before performing bit encoding. */
14679
14680 static struct neon_type_el
14681 neon_check_type (unsigned els, enum neon_shape ns, ...)
14682 {
14683 va_list ap;
14684 unsigned i, pass, key_el = 0;
14685 unsigned types[NEON_MAX_TYPE_ELS];
14686 enum neon_el_type k_type = NT_invtype;
14687 unsigned k_size = -1u;
14688 struct neon_type_el badtype = {NT_invtype, -1};
14689 unsigned key_allowed = 0;
14690
14691 /* Optional registers in Neon instructions are always (not) in operand 1.
14692 Fill in the missing operand here, if it was omitted. */
14693 if (els > 1 && !inst.operands[1].present)
14694 inst.operands[1] = inst.operands[0];
14695
14696 /* Suck up all the varargs. */
14697 va_start (ap, ns);
14698 for (i = 0; i < els; i++)
14699 {
14700 unsigned thisarg = va_arg (ap, unsigned);
14701 if (thisarg == N_IGNORE_TYPE)
14702 {
14703 va_end (ap);
14704 return badtype;
14705 }
14706 types[i] = thisarg;
14707 if ((thisarg & N_KEY) != 0)
14708 key_el = i;
14709 }
14710 va_end (ap);
14711
14712 if (inst.vectype.elems > 0)
14713 for (i = 0; i < els; i++)
14714 if (inst.operands[i].vectype.type != NT_invtype)
14715 {
14716 first_error (_("types specified in both the mnemonic and operands"));
14717 return badtype;
14718 }
14719
14720 /* Duplicate inst.vectype elements here as necessary.
14721 FIXME: No idea if this is exactly the same as the ARM assembler,
14722 particularly when an insn takes one register and one non-register
14723 operand. */
14724 if (inst.vectype.elems == 1 && els > 1)
14725 {
14726 unsigned j;
14727 inst.vectype.elems = els;
14728 inst.vectype.el[key_el] = inst.vectype.el[0];
14729 for (j = 0; j < els; j++)
14730 if (j != key_el)
14731 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14732 types[j]);
14733 }
14734 else if (inst.vectype.elems == 0 && els > 0)
14735 {
14736 unsigned j;
14737 /* No types were given after the mnemonic, so look for types specified
14738 after each operand. We allow some flexibility here; as long as the
14739 "key" operand has a type, we can infer the others. */
14740 for (j = 0; j < els; j++)
14741 if (inst.operands[j].vectype.type != NT_invtype)
14742 inst.vectype.el[j] = inst.operands[j].vectype;
14743
14744 if (inst.operands[key_el].vectype.type != NT_invtype)
14745 {
14746 for (j = 0; j < els; j++)
14747 if (inst.operands[j].vectype.type == NT_invtype)
14748 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14749 types[j]);
14750 }
14751 else
14752 {
14753 first_error (_("operand types can't be inferred"));
14754 return badtype;
14755 }
14756 }
14757 else if (inst.vectype.elems != els)
14758 {
14759 first_error (_("type specifier has the wrong number of parts"));
14760 return badtype;
14761 }
14762
14763 for (pass = 0; pass < 2; pass++)
14764 {
14765 for (i = 0; i < els; i++)
14766 {
14767 unsigned thisarg = types[i];
14768 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14769 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14770 enum neon_el_type g_type = inst.vectype.el[i].type;
14771 unsigned g_size = inst.vectype.el[i].size;
14772
14773 /* Decay more-specific signed & unsigned types to sign-insensitive
14774 integer types if sign-specific variants are unavailable. */
14775 if ((g_type == NT_signed || g_type == NT_unsigned)
14776 && (types_allowed & N_SU_ALL) == 0)
14777 g_type = NT_integer;
14778
14779 /* If only untyped args are allowed, decay any more specific types to
14780 them. Some instructions only care about signs for some element
14781 sizes, so handle that properly. */
14782 if (((types_allowed & N_UNT) == 0)
14783 && ((g_size == 8 && (types_allowed & N_8) != 0)
14784 || (g_size == 16 && (types_allowed & N_16) != 0)
14785 || (g_size == 32 && (types_allowed & N_32) != 0)
14786 || (g_size == 64 && (types_allowed & N_64) != 0)))
14787 g_type = NT_untyped;
14788
14789 if (pass == 0)
14790 {
14791 if ((thisarg & N_KEY) != 0)
14792 {
14793 k_type = g_type;
14794 k_size = g_size;
14795 key_allowed = thisarg & ~N_KEY;
14796
14797 /* Check architecture constraint on FP16 extension. */
14798 if (k_size == 16
14799 && k_type == NT_float
14800 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14801 {
14802 inst.error = _(BAD_FP16);
14803 return badtype;
14804 }
14805 }
14806 }
14807 else
14808 {
14809 if ((thisarg & N_VFP) != 0)
14810 {
14811 enum neon_shape_el regshape;
14812 unsigned regwidth, match;
14813
14814 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14815 if (ns == NS_NULL)
14816 {
14817 first_error (_("invalid instruction shape"));
14818 return badtype;
14819 }
14820 regshape = neon_shape_tab[ns].el[i];
14821 regwidth = neon_shape_el_size[regshape];
14822
14823 /* In VFP mode, operands must match register widths. If we
14824 have a key operand, use its width, else use the width of
14825 the current operand. */
14826 if (k_size != -1u)
14827 match = k_size;
14828 else
14829 match = g_size;
14830
14831 /* FP16 will use a single precision register. */
14832 if (regwidth == 32 && match == 16)
14833 {
14834 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14835 match = regwidth;
14836 else
14837 {
14838 inst.error = _(BAD_FP16);
14839 return badtype;
14840 }
14841 }
14842
14843 if (regwidth != match)
14844 {
14845 first_error (_("operand size must match register width"));
14846 return badtype;
14847 }
14848 }
14849
14850 if ((thisarg & N_EQK) == 0)
14851 {
14852 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14853
14854 if ((given_type & types_allowed) == 0)
14855 {
14856 first_error (BAD_SIMD_TYPE);
14857 return badtype;
14858 }
14859 }
14860 else
14861 {
14862 enum neon_el_type mod_k_type = k_type;
14863 unsigned mod_k_size = k_size;
14864 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14865 if (g_type != mod_k_type || g_size != mod_k_size)
14866 {
14867 first_error (_("inconsistent types in Neon instruction"));
14868 return badtype;
14869 }
14870 }
14871 }
14872 }
14873 }
14874
14875 return inst.vectype.el[key_el];
14876 }
14877
14878 /* Neon-style VFP instruction forwarding. */
14879
14880 /* Thumb VFP instructions have 0xE in the condition field. */
14881
14882 static void
14883 do_vfp_cond_or_thumb (void)
14884 {
14885 inst.is_neon = 1;
14886
14887 if (thumb_mode)
14888 inst.instruction |= 0xe0000000;
14889 else
14890 inst.instruction |= inst.cond << 28;
14891 }
14892
14893 /* Look up and encode a simple mnemonic, for use as a helper function for the
14894 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14895 etc. It is assumed that operand parsing has already been done, and that the
14896 operands are in the form expected by the given opcode (this isn't necessarily
14897 the same as the form in which they were parsed, hence some massaging must
14898 take place before this function is called).
14899 Checks current arch version against that in the looked-up opcode. */
14900
14901 static void
14902 do_vfp_nsyn_opcode (const char *opname)
14903 {
14904 const struct asm_opcode *opcode;
14905
14906 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14907
14908 if (!opcode)
14909 abort ();
14910
14911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14912 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14913 _(BAD_FPU));
14914
14915 inst.is_neon = 1;
14916
14917 if (thumb_mode)
14918 {
14919 inst.instruction = opcode->tvalue;
14920 opcode->tencode ();
14921 }
14922 else
14923 {
14924 inst.instruction = (inst.cond << 28) | opcode->avalue;
14925 opcode->aencode ();
14926 }
14927 }
14928
14929 static void
14930 do_vfp_nsyn_add_sub (enum neon_shape rs)
14931 {
14932 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14933
14934 if (rs == NS_FFF || rs == NS_HHH)
14935 {
14936 if (is_add)
14937 do_vfp_nsyn_opcode ("fadds");
14938 else
14939 do_vfp_nsyn_opcode ("fsubs");
14940
14941 /* ARMv8.2 fp16 instruction. */
14942 if (rs == NS_HHH)
14943 do_scalar_fp16_v82_encode ();
14944 }
14945 else
14946 {
14947 if (is_add)
14948 do_vfp_nsyn_opcode ("faddd");
14949 else
14950 do_vfp_nsyn_opcode ("fsubd");
14951 }
14952 }
14953
14954 /* Check operand types to see if this is a VFP instruction, and if so call
14955 PFN (). */
14956
14957 static int
14958 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14959 {
14960 enum neon_shape rs;
14961 struct neon_type_el et;
14962
14963 switch (args)
14964 {
14965 case 2:
14966 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14967 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14968 break;
14969
14970 case 3:
14971 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14972 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14973 N_F_ALL | N_KEY | N_VFP);
14974 break;
14975
14976 default:
14977 abort ();
14978 }
14979
14980 if (et.type != NT_invtype)
14981 {
14982 pfn (rs);
14983 return SUCCESS;
14984 }
14985
14986 inst.error = NULL;
14987 return FAIL;
14988 }
14989
14990 static void
14991 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14992 {
14993 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14994
14995 if (rs == NS_FFF || rs == NS_HHH)
14996 {
14997 if (is_mla)
14998 do_vfp_nsyn_opcode ("fmacs");
14999 else
15000 do_vfp_nsyn_opcode ("fnmacs");
15001
15002 /* ARMv8.2 fp16 instruction. */
15003 if (rs == NS_HHH)
15004 do_scalar_fp16_v82_encode ();
15005 }
15006 else
15007 {
15008 if (is_mla)
15009 do_vfp_nsyn_opcode ("fmacd");
15010 else
15011 do_vfp_nsyn_opcode ("fnmacd");
15012 }
15013 }
15014
15015 static void
15016 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15017 {
15018 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15019
15020 if (rs == NS_FFF || rs == NS_HHH)
15021 {
15022 if (is_fma)
15023 do_vfp_nsyn_opcode ("ffmas");
15024 else
15025 do_vfp_nsyn_opcode ("ffnmas");
15026
15027 /* ARMv8.2 fp16 instruction. */
15028 if (rs == NS_HHH)
15029 do_scalar_fp16_v82_encode ();
15030 }
15031 else
15032 {
15033 if (is_fma)
15034 do_vfp_nsyn_opcode ("ffmad");
15035 else
15036 do_vfp_nsyn_opcode ("ffnmad");
15037 }
15038 }
15039
15040 static void
15041 do_vfp_nsyn_mul (enum neon_shape rs)
15042 {
15043 if (rs == NS_FFF || rs == NS_HHH)
15044 {
15045 do_vfp_nsyn_opcode ("fmuls");
15046
15047 /* ARMv8.2 fp16 instruction. */
15048 if (rs == NS_HHH)
15049 do_scalar_fp16_v82_encode ();
15050 }
15051 else
15052 do_vfp_nsyn_opcode ("fmuld");
15053 }
15054
15055 static void
15056 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15057 {
15058 int is_neg = (inst.instruction & 0x80) != 0;
15059 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15060
15061 if (rs == NS_FF || rs == NS_HH)
15062 {
15063 if (is_neg)
15064 do_vfp_nsyn_opcode ("fnegs");
15065 else
15066 do_vfp_nsyn_opcode ("fabss");
15067
15068 /* ARMv8.2 fp16 instruction. */
15069 if (rs == NS_HH)
15070 do_scalar_fp16_v82_encode ();
15071 }
15072 else
15073 {
15074 if (is_neg)
15075 do_vfp_nsyn_opcode ("fnegd");
15076 else
15077 do_vfp_nsyn_opcode ("fabsd");
15078 }
15079 }
15080
15081 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15082 insns belong to Neon, and are handled elsewhere. */
15083
15084 static void
15085 do_vfp_nsyn_ldm_stm (int is_dbmode)
15086 {
15087 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15088 if (is_ldm)
15089 {
15090 if (is_dbmode)
15091 do_vfp_nsyn_opcode ("fldmdbs");
15092 else
15093 do_vfp_nsyn_opcode ("fldmias");
15094 }
15095 else
15096 {
15097 if (is_dbmode)
15098 do_vfp_nsyn_opcode ("fstmdbs");
15099 else
15100 do_vfp_nsyn_opcode ("fstmias");
15101 }
15102 }
15103
15104 static void
15105 do_vfp_nsyn_sqrt (void)
15106 {
15107 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15108 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15109
15110 if (rs == NS_FF || rs == NS_HH)
15111 {
15112 do_vfp_nsyn_opcode ("fsqrts");
15113
15114 /* ARMv8.2 fp16 instruction. */
15115 if (rs == NS_HH)
15116 do_scalar_fp16_v82_encode ();
15117 }
15118 else
15119 do_vfp_nsyn_opcode ("fsqrtd");
15120 }
15121
15122 static void
15123 do_vfp_nsyn_div (void)
15124 {
15125 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15126 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15127 N_F_ALL | N_KEY | N_VFP);
15128
15129 if (rs == NS_FFF || rs == NS_HHH)
15130 {
15131 do_vfp_nsyn_opcode ("fdivs");
15132
15133 /* ARMv8.2 fp16 instruction. */
15134 if (rs == NS_HHH)
15135 do_scalar_fp16_v82_encode ();
15136 }
15137 else
15138 do_vfp_nsyn_opcode ("fdivd");
15139 }
15140
15141 static void
15142 do_vfp_nsyn_nmul (void)
15143 {
15144 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15145 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15146 N_F_ALL | N_KEY | N_VFP);
15147
15148 if (rs == NS_FFF || rs == NS_HHH)
15149 {
15150 NEON_ENCODE (SINGLE, inst);
15151 do_vfp_sp_dyadic ();
15152
15153 /* ARMv8.2 fp16 instruction. */
15154 if (rs == NS_HHH)
15155 do_scalar_fp16_v82_encode ();
15156 }
15157 else
15158 {
15159 NEON_ENCODE (DOUBLE, inst);
15160 do_vfp_dp_rd_rn_rm ();
15161 }
15162 do_vfp_cond_or_thumb ();
15163
15164 }
15165
15166 static void
15167 do_vfp_nsyn_cmp (void)
15168 {
15169 enum neon_shape rs;
15170 if (inst.operands[1].isreg)
15171 {
15172 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15173 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15174
15175 if (rs == NS_FF || rs == NS_HH)
15176 {
15177 NEON_ENCODE (SINGLE, inst);
15178 do_vfp_sp_monadic ();
15179 }
15180 else
15181 {
15182 NEON_ENCODE (DOUBLE, inst);
15183 do_vfp_dp_rd_rm ();
15184 }
15185 }
15186 else
15187 {
15188 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
15189 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
15190
15191 switch (inst.instruction & 0x0fffffff)
15192 {
15193 case N_MNEM_vcmp:
15194 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
15195 break;
15196 case N_MNEM_vcmpe:
15197 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
15198 break;
15199 default:
15200 abort ();
15201 }
15202
15203 if (rs == NS_FI || rs == NS_HI)
15204 {
15205 NEON_ENCODE (SINGLE, inst);
15206 do_vfp_sp_compare_z ();
15207 }
15208 else
15209 {
15210 NEON_ENCODE (DOUBLE, inst);
15211 do_vfp_dp_rd ();
15212 }
15213 }
15214 do_vfp_cond_or_thumb ();
15215
15216 /* ARMv8.2 fp16 instruction. */
15217 if (rs == NS_HI || rs == NS_HH)
15218 do_scalar_fp16_v82_encode ();
15219 }
15220
15221 static void
15222 nsyn_insert_sp (void)
15223 {
15224 inst.operands[1] = inst.operands[0];
15225 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
15226 inst.operands[0].reg = REG_SP;
15227 inst.operands[0].isreg = 1;
15228 inst.operands[0].writeback = 1;
15229 inst.operands[0].present = 1;
15230 }
15231
15232 static void
15233 do_vfp_nsyn_push (void)
15234 {
15235 nsyn_insert_sp ();
15236
15237 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15238 _("register list must contain at least 1 and at most 16 "
15239 "registers"));
15240
15241 if (inst.operands[1].issingle)
15242 do_vfp_nsyn_opcode ("fstmdbs");
15243 else
15244 do_vfp_nsyn_opcode ("fstmdbd");
15245 }
15246
15247 static void
15248 do_vfp_nsyn_pop (void)
15249 {
15250 nsyn_insert_sp ();
15251
15252 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15253 _("register list must contain at least 1 and at most 16 "
15254 "registers"));
15255
15256 if (inst.operands[1].issingle)
15257 do_vfp_nsyn_opcode ("fldmias");
15258 else
15259 do_vfp_nsyn_opcode ("fldmiad");
15260 }
15261
15262 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15263 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15264
15265 static void
15266 neon_dp_fixup (struct arm_it* insn)
15267 {
15268 unsigned int i = insn->instruction;
15269 insn->is_neon = 1;
15270
15271 if (thumb_mode)
15272 {
15273 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15274 if (i & (1 << 24))
15275 i |= 1 << 28;
15276
15277 i &= ~(1 << 24);
15278
15279 i |= 0xef000000;
15280 }
15281 else
15282 i |= 0xf2000000;
15283
15284 insn->instruction = i;
15285 }
15286
15287 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15288 (0, 1, 2, 3). */
15289
15290 static unsigned
15291 neon_logbits (unsigned x)
15292 {
15293 return ffs (x) - 4;
15294 }
15295
15296 #define LOW4(R) ((R) & 0xf)
15297 #define HI1(R) (((R) >> 4) & 1)
15298
15299 static void
15300 mve_encode_qqr (int size, int fp)
15301 {
15302 if (inst.operands[2].reg == REG_SP)
15303 as_tsktsk (MVE_BAD_SP);
15304 else if (inst.operands[2].reg == REG_PC)
15305 as_tsktsk (MVE_BAD_PC);
15306
15307 if (fp)
15308 {
15309 /* vadd. */
15310 if (((unsigned)inst.instruction) == 0xd00)
15311 inst.instruction = 0xee300f40;
15312 /* vsub. */
15313 else if (((unsigned)inst.instruction) == 0x200d00)
15314 inst.instruction = 0xee301f40;
15315
15316 /* Setting size which is 1 for F16 and 0 for F32. */
15317 inst.instruction |= (size == 16) << 28;
15318 }
15319 else
15320 {
15321 /* vadd. */
15322 if (((unsigned)inst.instruction) == 0x800)
15323 inst.instruction = 0xee010f40;
15324 /* vsub. */
15325 else if (((unsigned)inst.instruction) == 0x1000800)
15326 inst.instruction = 0xee011f40;
15327 /* Setting bits for size. */
15328 inst.instruction |= neon_logbits (size) << 20;
15329 }
15330 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15331 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15332 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15333 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15334 inst.instruction |= inst.operands[2].reg;
15335 inst.is_neon = 1;
15336 }
15337
15338 static void
15339 mve_encode_rqq (unsigned bit28, unsigned size)
15340 {
15341 inst.instruction |= bit28 << 28;
15342 inst.instruction |= neon_logbits (size) << 20;
15343 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15344 inst.instruction |= inst.operands[0].reg << 12;
15345 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15346 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15347 inst.instruction |= LOW4 (inst.operands[2].reg);
15348 inst.is_neon = 1;
15349 }
15350
15351 static void
15352 mve_encode_qqq (int ubit, int size)
15353 {
15354
15355 inst.instruction |= (ubit != 0) << 28;
15356 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15357 inst.instruction |= neon_logbits (size) << 20;
15358 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15359 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15360 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15361 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15362 inst.instruction |= LOW4 (inst.operands[2].reg);
15363
15364 inst.is_neon = 1;
15365 }
15366
15367
15368 /* Encode insns with bit pattern:
15369
15370 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15371 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15372
15373 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15374 different meaning for some instruction. */
15375
15376 static void
15377 neon_three_same (int isquad, int ubit, int size)
15378 {
15379 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15380 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15381 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15382 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15383 inst.instruction |= LOW4 (inst.operands[2].reg);
15384 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15385 inst.instruction |= (isquad != 0) << 6;
15386 inst.instruction |= (ubit != 0) << 24;
15387 if (size != -1)
15388 inst.instruction |= neon_logbits (size) << 20;
15389
15390 neon_dp_fixup (&inst);
15391 }
15392
15393 /* Encode instructions of the form:
15394
15395 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15396 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15397
15398 Don't write size if SIZE == -1. */
15399
15400 static void
15401 neon_two_same (int qbit, int ubit, int size)
15402 {
15403 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15404 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15405 inst.instruction |= LOW4 (inst.operands[1].reg);
15406 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15407 inst.instruction |= (qbit != 0) << 6;
15408 inst.instruction |= (ubit != 0) << 24;
15409
15410 if (size != -1)
15411 inst.instruction |= neon_logbits (size) << 18;
15412
15413 neon_dp_fixup (&inst);
15414 }
15415
15416 /* Neon instruction encoders, in approximate order of appearance. */
15417
15418 static void
15419 do_neon_dyadic_i_su (void)
15420 {
15421 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15422 struct neon_type_el et = neon_check_type (3, rs,
15423 N_EQK, N_EQK, N_SU_32 | N_KEY);
15424 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15425 }
15426
15427 static void
15428 do_neon_dyadic_i64_su (void)
15429 {
15430 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15431 struct neon_type_el et = neon_check_type (3, rs,
15432 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15433 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15434 }
15435
15436 static void
15437 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15438 unsigned immbits)
15439 {
15440 unsigned size = et.size >> 3;
15441 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15442 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15443 inst.instruction |= LOW4 (inst.operands[1].reg);
15444 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15445 inst.instruction |= (isquad != 0) << 6;
15446 inst.instruction |= immbits << 16;
15447 inst.instruction |= (size >> 3) << 7;
15448 inst.instruction |= (size & 0x7) << 19;
15449 if (write_ubit)
15450 inst.instruction |= (uval != 0) << 24;
15451
15452 neon_dp_fixup (&inst);
15453 }
15454
15455 static void
15456 do_neon_shl_imm (void)
15457 {
15458 if (!inst.operands[2].isreg)
15459 {
15460 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15461 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15462 int imm = inst.operands[2].imm;
15463
15464 constraint (imm < 0 || (unsigned)imm >= et.size,
15465 _("immediate out of range for shift"));
15466 NEON_ENCODE (IMMED, inst);
15467 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15468 }
15469 else
15470 {
15471 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15472 struct neon_type_el et = neon_check_type (3, rs,
15473 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15474 unsigned int tmp;
15475
15476 /* VSHL/VQSHL 3-register variants have syntax such as:
15477 vshl.xx Dd, Dm, Dn
15478 whereas other 3-register operations encoded by neon_three_same have
15479 syntax like:
15480 vadd.xx Dd, Dn, Dm
15481 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15482 here. */
15483 tmp = inst.operands[2].reg;
15484 inst.operands[2].reg = inst.operands[1].reg;
15485 inst.operands[1].reg = tmp;
15486 NEON_ENCODE (INTEGER, inst);
15487 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15488 }
15489 }
15490
15491 static void
15492 do_neon_qshl_imm (void)
15493 {
15494 if (!inst.operands[2].isreg)
15495 {
15496 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15497 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15498 int imm = inst.operands[2].imm;
15499
15500 constraint (imm < 0 || (unsigned)imm >= et.size,
15501 _("immediate out of range for shift"));
15502 NEON_ENCODE (IMMED, inst);
15503 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15504 }
15505 else
15506 {
15507 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15508 struct neon_type_el et = neon_check_type (3, rs,
15509 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15510 unsigned int tmp;
15511
15512 /* See note in do_neon_shl_imm. */
15513 tmp = inst.operands[2].reg;
15514 inst.operands[2].reg = inst.operands[1].reg;
15515 inst.operands[1].reg = tmp;
15516 NEON_ENCODE (INTEGER, inst);
15517 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15518 }
15519 }
15520
15521 static void
15522 do_neon_rshl (void)
15523 {
15524 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15525 struct neon_type_el et = neon_check_type (3, rs,
15526 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15527 unsigned int tmp;
15528
15529 tmp = inst.operands[2].reg;
15530 inst.operands[2].reg = inst.operands[1].reg;
15531 inst.operands[1].reg = tmp;
15532 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15533 }
15534
15535 static int
15536 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15537 {
15538 /* Handle .I8 pseudo-instructions. */
15539 if (size == 8)
15540 {
15541 /* Unfortunately, this will make everything apart from zero out-of-range.
15542 FIXME is this the intended semantics? There doesn't seem much point in
15543 accepting .I8 if so. */
15544 immediate |= immediate << 8;
15545 size = 16;
15546 }
15547
15548 if (size >= 32)
15549 {
15550 if (immediate == (immediate & 0x000000ff))
15551 {
15552 *immbits = immediate;
15553 return 0x1;
15554 }
15555 else if (immediate == (immediate & 0x0000ff00))
15556 {
15557 *immbits = immediate >> 8;
15558 return 0x3;
15559 }
15560 else if (immediate == (immediate & 0x00ff0000))
15561 {
15562 *immbits = immediate >> 16;
15563 return 0x5;
15564 }
15565 else if (immediate == (immediate & 0xff000000))
15566 {
15567 *immbits = immediate >> 24;
15568 return 0x7;
15569 }
15570 if ((immediate & 0xffff) != (immediate >> 16))
15571 goto bad_immediate;
15572 immediate &= 0xffff;
15573 }
15574
15575 if (immediate == (immediate & 0x000000ff))
15576 {
15577 *immbits = immediate;
15578 return 0x9;
15579 }
15580 else if (immediate == (immediate & 0x0000ff00))
15581 {
15582 *immbits = immediate >> 8;
15583 return 0xb;
15584 }
15585
15586 bad_immediate:
15587 first_error (_("immediate value out of range"));
15588 return FAIL;
15589 }
15590
15591 static void
15592 do_neon_logic (void)
15593 {
15594 if (inst.operands[2].present && inst.operands[2].isreg)
15595 {
15596 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15597 neon_check_type (3, rs, N_IGNORE_TYPE);
15598 /* U bit and size field were set as part of the bitmask. */
15599 NEON_ENCODE (INTEGER, inst);
15600 neon_three_same (neon_quad (rs), 0, -1);
15601 }
15602 else
15603 {
15604 const int three_ops_form = (inst.operands[2].present
15605 && !inst.operands[2].isreg);
15606 const int immoperand = (three_ops_form ? 2 : 1);
15607 enum neon_shape rs = (three_ops_form
15608 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15609 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15610 struct neon_type_el et = neon_check_type (2, rs,
15611 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15612 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15613 unsigned immbits;
15614 int cmode;
15615
15616 if (et.type == NT_invtype)
15617 return;
15618
15619 if (three_ops_form)
15620 constraint (inst.operands[0].reg != inst.operands[1].reg,
15621 _("first and second operands shall be the same register"));
15622
15623 NEON_ENCODE (IMMED, inst);
15624
15625 immbits = inst.operands[immoperand].imm;
15626 if (et.size == 64)
15627 {
15628 /* .i64 is a pseudo-op, so the immediate must be a repeating
15629 pattern. */
15630 if (immbits != (inst.operands[immoperand].regisimm ?
15631 inst.operands[immoperand].reg : 0))
15632 {
15633 /* Set immbits to an invalid constant. */
15634 immbits = 0xdeadbeef;
15635 }
15636 }
15637
15638 switch (opcode)
15639 {
15640 case N_MNEM_vbic:
15641 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15642 break;
15643
15644 case N_MNEM_vorr:
15645 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15646 break;
15647
15648 case N_MNEM_vand:
15649 /* Pseudo-instruction for VBIC. */
15650 neon_invert_size (&immbits, 0, et.size);
15651 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15652 break;
15653
15654 case N_MNEM_vorn:
15655 /* Pseudo-instruction for VORR. */
15656 neon_invert_size (&immbits, 0, et.size);
15657 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15658 break;
15659
15660 default:
15661 abort ();
15662 }
15663
15664 if (cmode == FAIL)
15665 return;
15666
15667 inst.instruction |= neon_quad (rs) << 6;
15668 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15669 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15670 inst.instruction |= cmode << 8;
15671 neon_write_immbits (immbits);
15672
15673 neon_dp_fixup (&inst);
15674 }
15675 }
15676
15677 static void
15678 do_neon_bitfield (void)
15679 {
15680 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15681 neon_check_type (3, rs, N_IGNORE_TYPE);
15682 neon_three_same (neon_quad (rs), 0, -1);
15683 }
15684
15685 static void
15686 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15687 unsigned destbits)
15688 {
15689 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15690 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15691 types | N_KEY);
15692 if (et.type == NT_float)
15693 {
15694 NEON_ENCODE (FLOAT, inst);
15695 if (rs == NS_QQR)
15696 mve_encode_qqr (et.size, 1);
15697 else
15698 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15699 }
15700 else
15701 {
15702 NEON_ENCODE (INTEGER, inst);
15703 if (rs == NS_QQR)
15704 mve_encode_qqr (et.size, 0);
15705 else
15706 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15707 }
15708 }
15709
15710
15711 static void
15712 do_neon_dyadic_if_su_d (void)
15713 {
15714 /* This version only allow D registers, but that constraint is enforced during
15715 operand parsing so we don't need to do anything extra here. */
15716 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15717 }
15718
15719 static void
15720 do_neon_dyadic_if_i_d (void)
15721 {
15722 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15723 affected if we specify unsigned args. */
15724 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15725 }
15726
15727 enum vfp_or_neon_is_neon_bits
15728 {
15729 NEON_CHECK_CC = 1,
15730 NEON_CHECK_ARCH = 2,
15731 NEON_CHECK_ARCH8 = 4
15732 };
15733
15734 /* Call this function if an instruction which may have belonged to the VFP or
15735 Neon instruction sets, but turned out to be a Neon instruction (due to the
15736 operand types involved, etc.). We have to check and/or fix-up a couple of
15737 things:
15738
15739 - Make sure the user hasn't attempted to make a Neon instruction
15740 conditional.
15741 - Alter the value in the condition code field if necessary.
15742 - Make sure that the arch supports Neon instructions.
15743
15744 Which of these operations take place depends on bits from enum
15745 vfp_or_neon_is_neon_bits.
15746
15747 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15748 current instruction's condition is COND_ALWAYS, the condition field is
15749 changed to inst.uncond_value. This is necessary because instructions shared
15750 between VFP and Neon may be conditional for the VFP variants only, and the
15751 unconditional Neon version must have, e.g., 0xF in the condition field. */
15752
15753 static int
15754 vfp_or_neon_is_neon (unsigned check)
15755 {
15756 /* Conditions are always legal in Thumb mode (IT blocks). */
15757 if (!thumb_mode && (check & NEON_CHECK_CC))
15758 {
15759 if (inst.cond != COND_ALWAYS)
15760 {
15761 first_error (_(BAD_COND));
15762 return FAIL;
15763 }
15764 if (inst.uncond_value != -1)
15765 inst.instruction |= inst.uncond_value << 28;
15766 }
15767
15768
15769 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
15770 || ((check & NEON_CHECK_ARCH8)
15771 && !mark_feature_used (&fpu_neon_ext_armv8)))
15772 {
15773 first_error (_(BAD_FPU));
15774 return FAIL;
15775 }
15776
15777 return SUCCESS;
15778 }
15779
15780 static int
15781 check_simd_pred_availability (int fp, unsigned check)
15782 {
15783 if (inst.cond > COND_ALWAYS)
15784 {
15785 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15786 {
15787 inst.error = BAD_FPU;
15788 return 1;
15789 }
15790 inst.pred_insn_type = INSIDE_VPT_INSN;
15791 }
15792 else if (inst.cond < COND_ALWAYS)
15793 {
15794 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15795 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15796 else if (vfp_or_neon_is_neon (check) == FAIL)
15797 return 2;
15798 }
15799 else
15800 {
15801 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
15802 && vfp_or_neon_is_neon (check) == FAIL)
15803 return 3;
15804
15805 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15806 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15807 }
15808 return 0;
15809 }
15810
15811 static void
15812 do_mve_vstr_vldr_QI (int size, int elsize, int load)
15813 {
15814 constraint (size < 32, BAD_ADDR_MODE);
15815 constraint (size != elsize, BAD_EL_TYPE);
15816 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
15817 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
15818 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
15819 _("destination register and offset register may not be the"
15820 " same"));
15821
15822 int imm = inst.relocs[0].exp.X_add_number;
15823 int add = 1;
15824 if (imm < 0)
15825 {
15826 add = 0;
15827 imm = -imm;
15828 }
15829 constraint ((imm % (size / 8) != 0)
15830 || imm > (0x7f << neon_logbits (size)),
15831 (size == 32) ? _("immediate must be a multiple of 4 in the"
15832 " range of +/-[0,508]")
15833 : _("immediate must be a multiple of 8 in the"
15834 " range of +/-[0,1016]"));
15835 inst.instruction |= 0x11 << 24;
15836 inst.instruction |= add << 23;
15837 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15838 inst.instruction |= inst.operands[1].writeback << 21;
15839 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15840 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15841 inst.instruction |= 1 << 12;
15842 inst.instruction |= (size == 64) << 8;
15843 inst.instruction &= 0xffffff00;
15844 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15845 inst.instruction |= imm >> neon_logbits (size);
15846 }
15847
15848 static void
15849 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
15850 {
15851 unsigned os = inst.operands[1].imm >> 5;
15852 constraint (os != 0 && size == 8,
15853 _("can not shift offsets when accessing less than half-word"));
15854 constraint (os && os != neon_logbits (size),
15855 _("shift immediate must be 1, 2 or 3 for half-word, word"
15856 " or double-word accesses respectively"));
15857 if (inst.operands[1].reg == REG_PC)
15858 as_tsktsk (MVE_BAD_PC);
15859
15860 switch (size)
15861 {
15862 case 8:
15863 constraint (elsize >= 64, BAD_EL_TYPE);
15864 break;
15865 case 16:
15866 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
15867 break;
15868 case 32:
15869 case 64:
15870 constraint (elsize != size, BAD_EL_TYPE);
15871 break;
15872 default:
15873 break;
15874 }
15875 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
15876 BAD_ADDR_MODE);
15877 if (load)
15878 {
15879 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
15880 _("destination register and offset register may not be"
15881 " the same"));
15882 constraint (size == elsize && inst.vectype.el[0].type != NT_unsigned,
15883 BAD_EL_TYPE);
15884 constraint (inst.vectype.el[0].type != NT_unsigned
15885 && inst.vectype.el[0].type != NT_signed, BAD_EL_TYPE);
15886 inst.instruction |= (inst.vectype.el[0].type == NT_unsigned) << 28;
15887 }
15888 else
15889 {
15890 constraint (inst.vectype.el[0].type != NT_untyped, BAD_EL_TYPE);
15891 }
15892
15893 inst.instruction |= 1 << 23;
15894 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15895 inst.instruction |= inst.operands[1].reg << 16;
15896 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15897 inst.instruction |= neon_logbits (elsize) << 7;
15898 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
15899 inst.instruction |= LOW4 (inst.operands[1].imm);
15900 inst.instruction |= !!os;
15901 }
15902
15903 static void
15904 do_mve_vstr_vldr_RI (int size, int elsize, int load)
15905 {
15906 enum neon_el_type type = inst.vectype.el[0].type;
15907
15908 constraint (size >= 64, BAD_ADDR_MODE);
15909 switch (size)
15910 {
15911 case 16:
15912 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
15913 break;
15914 case 32:
15915 constraint (elsize != size, BAD_EL_TYPE);
15916 break;
15917 default:
15918 break;
15919 }
15920 if (load)
15921 {
15922 constraint (elsize != size && type != NT_unsigned
15923 && type != NT_signed, BAD_EL_TYPE);
15924 }
15925 else
15926 {
15927 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
15928 }
15929
15930 int imm = inst.relocs[0].exp.X_add_number;
15931 int add = 1;
15932 if (imm < 0)
15933 {
15934 add = 0;
15935 imm = -imm;
15936 }
15937
15938 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
15939 {
15940 switch (size)
15941 {
15942 case 8:
15943 constraint (1, _("immediate must be in the range of +/-[0,127]"));
15944 break;
15945 case 16:
15946 constraint (1, _("immediate must be a multiple of 2 in the"
15947 " range of +/-[0,254]"));
15948 break;
15949 case 32:
15950 constraint (1, _("immediate must be a multiple of 4 in the"
15951 " range of +/-[0,508]"));
15952 break;
15953 }
15954 }
15955
15956 if (size != elsize)
15957 {
15958 constraint (inst.operands[1].reg > 7, BAD_HIREG);
15959 constraint (inst.operands[0].reg > 14,
15960 _("MVE vector register in the range [Q0..Q7] expected"));
15961 inst.instruction |= (load && type == NT_unsigned) << 28;
15962 inst.instruction |= (size == 16) << 19;
15963 inst.instruction |= neon_logbits (elsize) << 7;
15964 }
15965 else
15966 {
15967 if (inst.operands[1].reg == REG_PC)
15968 as_tsktsk (MVE_BAD_PC);
15969 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
15970 as_tsktsk (MVE_BAD_SP);
15971 inst.instruction |= 1 << 12;
15972 inst.instruction |= neon_logbits (size) << 7;
15973 }
15974 inst.instruction |= inst.operands[1].preind << 24;
15975 inst.instruction |= add << 23;
15976 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15977 inst.instruction |= inst.operands[1].writeback << 21;
15978 inst.instruction |= inst.operands[1].reg << 16;
15979 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15980 inst.instruction &= 0xffffff80;
15981 inst.instruction |= imm >> neon_logbits (size);
15982
15983 }
15984
15985 static void
15986 do_mve_vstr_vldr (void)
15987 {
15988 unsigned size;
15989 int load = 0;
15990
15991 if (inst.cond > COND_ALWAYS)
15992 inst.pred_insn_type = INSIDE_VPT_INSN;
15993 else
15994 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15995
15996 switch (inst.instruction)
15997 {
15998 default:
15999 gas_assert (0);
16000 break;
16001 case M_MNEM_vldrb:
16002 load = 1;
16003 /* fall through. */
16004 case M_MNEM_vstrb:
16005 size = 8;
16006 break;
16007 case M_MNEM_vldrh:
16008 load = 1;
16009 /* fall through. */
16010 case M_MNEM_vstrh:
16011 size = 16;
16012 break;
16013 case M_MNEM_vldrw:
16014 load = 1;
16015 /* fall through. */
16016 case M_MNEM_vstrw:
16017 size = 32;
16018 break;
16019 case M_MNEM_vldrd:
16020 load = 1;
16021 /* fall through. */
16022 case M_MNEM_vstrd:
16023 size = 64;
16024 break;
16025 }
16026 unsigned elsize = inst.vectype.el[0].size;
16027
16028 if (inst.operands[1].isquad)
16029 {
16030 /* We are dealing with [Q, imm]{!} cases. */
16031 do_mve_vstr_vldr_QI (size, elsize, load);
16032 }
16033 else
16034 {
16035 if (inst.operands[1].immisreg == 2)
16036 {
16037 /* We are dealing with [R, Q, {UXTW #os}] cases. */
16038 do_mve_vstr_vldr_RQ (size, elsize, load);
16039 }
16040 else if (!inst.operands[1].immisreg)
16041 {
16042 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
16043 do_mve_vstr_vldr_RI (size, elsize, load);
16044 }
16045 else
16046 constraint (1, BAD_ADDR_MODE);
16047 }
16048
16049 inst.is_neon = 1;
16050 }
16051
16052 static void
16053 do_mve_vst_vld (void)
16054 {
16055 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16056 return;
16057
16058 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
16059 || inst.relocs[0].exp.X_add_number != 0
16060 || inst.operands[1].immisreg != 0,
16061 BAD_ADDR_MODE);
16062 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
16063 if (inst.operands[1].reg == REG_PC)
16064 as_tsktsk (MVE_BAD_PC);
16065 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
16066 as_tsktsk (MVE_BAD_SP);
16067
16068
16069 /* These instructions are one of the "exceptions" mentioned in
16070 handle_pred_state. They are MVE instructions that are not VPT compatible
16071 and do not accept a VPT code, thus appending such a code is a syntax
16072 error. */
16073 if (inst.cond > COND_ALWAYS)
16074 first_error (BAD_SYNTAX);
16075 /* If we append a scalar condition code we can set this to
16076 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
16077 else if (inst.cond < COND_ALWAYS)
16078 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16079 else
16080 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
16081
16082 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16083 inst.instruction |= inst.operands[1].writeback << 21;
16084 inst.instruction |= inst.operands[1].reg << 16;
16085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16086 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
16087 inst.is_neon = 1;
16088 }
16089
16090 static void
16091 do_neon_dyadic_if_su (void)
16092 {
16093 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
16094 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16095 N_SUF_32 | N_KEY);
16096
16097 if (check_simd_pred_availability (et.type == NT_float,
16098 NEON_CHECK_ARCH | NEON_CHECK_CC))
16099 return;
16100
16101 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
16102 }
16103
16104 static void
16105 do_neon_addsub_if_i (void)
16106 {
16107 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
16108 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
16109 return;
16110
16111 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
16112 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
16113 N_EQK, N_IF_32 | N_I64 | N_KEY);
16114
16115 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
16116 /* If we are parsing Q registers and the element types match MVE, which NEON
16117 also supports, then we must check whether this is an instruction that can
16118 be used by both MVE/NEON. This distinction can be made based on whether
16119 they are predicated or not. */
16120 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
16121 {
16122 if (check_simd_pred_availability (et.type == NT_float,
16123 NEON_CHECK_ARCH | NEON_CHECK_CC))
16124 return;
16125 }
16126 else
16127 {
16128 /* If they are either in a D register or are using an unsupported. */
16129 if (rs != NS_QQR
16130 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16131 return;
16132 }
16133
16134 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16135 affected if we specify unsigned args. */
16136 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
16137 }
16138
16139 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
16140 result to be:
16141 V<op> A,B (A is operand 0, B is operand 2)
16142 to mean:
16143 V<op> A,B,A
16144 not:
16145 V<op> A,B,B
16146 so handle that case specially. */
16147
16148 static void
16149 neon_exchange_operands (void)
16150 {
16151 if (inst.operands[1].present)
16152 {
16153 void *scratch = xmalloc (sizeof (inst.operands[0]));
16154
16155 /* Swap operands[1] and operands[2]. */
16156 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
16157 inst.operands[1] = inst.operands[2];
16158 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
16159 free (scratch);
16160 }
16161 else
16162 {
16163 inst.operands[1] = inst.operands[2];
16164 inst.operands[2] = inst.operands[0];
16165 }
16166 }
16167
16168 static void
16169 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
16170 {
16171 if (inst.operands[2].isreg)
16172 {
16173 if (invert)
16174 neon_exchange_operands ();
16175 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
16176 }
16177 else
16178 {
16179 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16180 struct neon_type_el et = neon_check_type (2, rs,
16181 N_EQK | N_SIZ, immtypes | N_KEY);
16182
16183 NEON_ENCODE (IMMED, inst);
16184 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16185 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16186 inst.instruction |= LOW4 (inst.operands[1].reg);
16187 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16188 inst.instruction |= neon_quad (rs) << 6;
16189 inst.instruction |= (et.type == NT_float) << 10;
16190 inst.instruction |= neon_logbits (et.size) << 18;
16191
16192 neon_dp_fixup (&inst);
16193 }
16194 }
16195
16196 static void
16197 do_neon_cmp (void)
16198 {
16199 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
16200 }
16201
16202 static void
16203 do_neon_cmp_inv (void)
16204 {
16205 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
16206 }
16207
16208 static void
16209 do_neon_ceq (void)
16210 {
16211 neon_compare (N_IF_32, N_IF_32, FALSE);
16212 }
16213
16214 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
16215 scalars, which are encoded in 5 bits, M : Rm.
16216 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
16217 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
16218 index in M.
16219
16220 Dot Product instructions are similar to multiply instructions except elsize
16221 should always be 32.
16222
16223 This function translates SCALAR, which is GAS's internal encoding of indexed
16224 scalar register, to raw encoding. There is also register and index range
16225 check based on ELSIZE. */
16226
16227 static unsigned
16228 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
16229 {
16230 unsigned regno = NEON_SCALAR_REG (scalar);
16231 unsigned elno = NEON_SCALAR_INDEX (scalar);
16232
16233 switch (elsize)
16234 {
16235 case 16:
16236 if (regno > 7 || elno > 3)
16237 goto bad_scalar;
16238 return regno | (elno << 3);
16239
16240 case 32:
16241 if (regno > 15 || elno > 1)
16242 goto bad_scalar;
16243 return regno | (elno << 4);
16244
16245 default:
16246 bad_scalar:
16247 first_error (_("scalar out of range for multiply instruction"));
16248 }
16249
16250 return 0;
16251 }
16252
16253 /* Encode multiply / multiply-accumulate scalar instructions. */
16254
16255 static void
16256 neon_mul_mac (struct neon_type_el et, int ubit)
16257 {
16258 unsigned scalar;
16259
16260 /* Give a more helpful error message if we have an invalid type. */
16261 if (et.type == NT_invtype)
16262 return;
16263
16264 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
16265 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16266 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16267 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16268 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16269 inst.instruction |= LOW4 (scalar);
16270 inst.instruction |= HI1 (scalar) << 5;
16271 inst.instruction |= (et.type == NT_float) << 8;
16272 inst.instruction |= neon_logbits (et.size) << 20;
16273 inst.instruction |= (ubit != 0) << 24;
16274
16275 neon_dp_fixup (&inst);
16276 }
16277
16278 static void
16279 do_neon_mac_maybe_scalar (void)
16280 {
16281 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
16282 return;
16283
16284 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16285 return;
16286
16287 if (inst.operands[2].isscalar)
16288 {
16289 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16290 struct neon_type_el et = neon_check_type (3, rs,
16291 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
16292 NEON_ENCODE (SCALAR, inst);
16293 neon_mul_mac (et, neon_quad (rs));
16294 }
16295 else
16296 {
16297 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16298 affected if we specify unsigned args. */
16299 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
16300 }
16301 }
16302
16303 static void
16304 do_neon_fmac (void)
16305 {
16306 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
16307 return;
16308
16309 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16310 return;
16311
16312 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
16313 }
16314
16315 static void
16316 do_neon_tst (void)
16317 {
16318 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16319 struct neon_type_el et = neon_check_type (3, rs,
16320 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
16321 neon_three_same (neon_quad (rs), 0, et.size);
16322 }
16323
16324 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16325 same types as the MAC equivalents. The polynomial type for this instruction
16326 is encoded the same as the integer type. */
16327
16328 static void
16329 do_neon_mul (void)
16330 {
16331 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
16332 return;
16333
16334 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16335 return;
16336
16337 if (inst.operands[2].isscalar)
16338 do_neon_mac_maybe_scalar ();
16339 else
16340 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
16341 }
16342
16343 static void
16344 do_neon_qdmulh (void)
16345 {
16346 if (inst.operands[2].isscalar)
16347 {
16348 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16349 struct neon_type_el et = neon_check_type (3, rs,
16350 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16351 NEON_ENCODE (SCALAR, inst);
16352 neon_mul_mac (et, neon_quad (rs));
16353 }
16354 else
16355 {
16356 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16357 struct neon_type_el et = neon_check_type (3, rs,
16358 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16359 NEON_ENCODE (INTEGER, inst);
16360 /* The U bit (rounding) comes from bit mask. */
16361 neon_three_same (neon_quad (rs), 0, et.size);
16362 }
16363 }
16364
16365 static void
16366 do_mve_vmull (void)
16367 {
16368
16369 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
16370 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
16371 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
16372 && inst.cond == COND_ALWAYS
16373 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
16374 {
16375 if (rs == NS_QQQ)
16376 {
16377
16378 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16379 N_SUF_32 | N_F64 | N_P8
16380 | N_P16 | N_I_MVE | N_KEY);
16381 if (((et.type == NT_poly) && et.size == 8
16382 && ARM_CPU_IS_ANY (cpu_variant))
16383 || (et.type == NT_integer) || (et.type == NT_float))
16384 goto neon_vmul;
16385 }
16386 else
16387 goto neon_vmul;
16388 }
16389
16390 constraint (rs != NS_QQQ, BAD_FPU);
16391 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16392 N_SU_32 | N_P8 | N_P16 | N_KEY);
16393
16394 /* We are dealing with MVE's vmullt. */
16395 if (et.size == 32
16396 && (inst.operands[0].reg == inst.operands[1].reg
16397 || inst.operands[0].reg == inst.operands[2].reg))
16398 as_tsktsk (BAD_MVE_SRCDEST);
16399
16400 if (inst.cond > COND_ALWAYS)
16401 inst.pred_insn_type = INSIDE_VPT_INSN;
16402 else
16403 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16404
16405 if (et.type == NT_poly)
16406 mve_encode_qqq (neon_logbits (et.size), 64);
16407 else
16408 mve_encode_qqq (et.type == NT_unsigned, et.size);
16409
16410 return;
16411
16412 neon_vmul:
16413 inst.instruction = N_MNEM_vmul;
16414 inst.cond = 0xb;
16415 if (thumb_mode)
16416 inst.pred_insn_type = INSIDE_IT_INSN;
16417 do_neon_mul ();
16418 }
16419
16420 static void
16421 do_mve_vabav (void)
16422 {
16423 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16424
16425 if (rs == NS_NULL)
16426 return;
16427
16428 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16429 return;
16430
16431 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
16432 | N_S16 | N_S32 | N_U8 | N_U16
16433 | N_U32);
16434
16435 if (inst.cond > COND_ALWAYS)
16436 inst.pred_insn_type = INSIDE_VPT_INSN;
16437 else
16438 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16439
16440 mve_encode_rqq (et.type == NT_unsigned, et.size);
16441 }
16442
16443 static void
16444 do_mve_vmladav (void)
16445 {
16446 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16447 struct neon_type_el et = neon_check_type (3, rs,
16448 N_EQK, N_EQK, N_SU_MVE | N_KEY);
16449
16450 if (et.type == NT_unsigned
16451 && (inst.instruction == M_MNEM_vmladavx
16452 || inst.instruction == M_MNEM_vmladavax
16453 || inst.instruction == M_MNEM_vmlsdav
16454 || inst.instruction == M_MNEM_vmlsdava
16455 || inst.instruction == M_MNEM_vmlsdavx
16456 || inst.instruction == M_MNEM_vmlsdavax))
16457 first_error (BAD_SIMD_TYPE);
16458
16459 constraint (inst.operands[2].reg > 14,
16460 _("MVE vector register in the range [Q0..Q7] expected"));
16461
16462 if (inst.cond > COND_ALWAYS)
16463 inst.pred_insn_type = INSIDE_VPT_INSN;
16464 else
16465 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16466
16467 if (inst.instruction == M_MNEM_vmlsdav
16468 || inst.instruction == M_MNEM_vmlsdava
16469 || inst.instruction == M_MNEM_vmlsdavx
16470 || inst.instruction == M_MNEM_vmlsdavax)
16471 inst.instruction |= (et.size == 8) << 28;
16472 else
16473 inst.instruction |= (et.size == 8) << 8;
16474
16475 mve_encode_rqq (et.type == NT_unsigned, 64);
16476 inst.instruction |= (et.size == 32) << 16;
16477 }
16478
16479 static void
16480 do_neon_qrdmlah (void)
16481 {
16482 /* Check we're on the correct architecture. */
16483 if (!mark_feature_used (&fpu_neon_ext_armv8))
16484 inst.error =
16485 _("instruction form not available on this architecture.");
16486 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
16487 {
16488 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16489 record_feature_use (&fpu_neon_ext_v8_1);
16490 }
16491
16492 if (inst.operands[2].isscalar)
16493 {
16494 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16495 struct neon_type_el et = neon_check_type (3, rs,
16496 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16497 NEON_ENCODE (SCALAR, inst);
16498 neon_mul_mac (et, neon_quad (rs));
16499 }
16500 else
16501 {
16502 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16503 struct neon_type_el et = neon_check_type (3, rs,
16504 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16505 NEON_ENCODE (INTEGER, inst);
16506 /* The U bit (rounding) comes from bit mask. */
16507 neon_three_same (neon_quad (rs), 0, et.size);
16508 }
16509 }
16510
16511 static void
16512 do_neon_fcmp_absolute (void)
16513 {
16514 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16515 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16516 N_F_16_32 | N_KEY);
16517 /* Size field comes from bit mask. */
16518 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
16519 }
16520
16521 static void
16522 do_neon_fcmp_absolute_inv (void)
16523 {
16524 neon_exchange_operands ();
16525 do_neon_fcmp_absolute ();
16526 }
16527
16528 static void
16529 do_neon_step (void)
16530 {
16531 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16532 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16533 N_F_16_32 | N_KEY);
16534 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
16535 }
16536
16537 static void
16538 do_neon_abs_neg (void)
16539 {
16540 enum neon_shape rs;
16541 struct neon_type_el et;
16542
16543 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
16544 return;
16545
16546 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16547 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
16548
16549 if (check_simd_pred_availability (et.type == NT_float,
16550 NEON_CHECK_ARCH | NEON_CHECK_CC))
16551 return;
16552
16553 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16554 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16555 inst.instruction |= LOW4 (inst.operands[1].reg);
16556 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16557 inst.instruction |= neon_quad (rs) << 6;
16558 inst.instruction |= (et.type == NT_float) << 10;
16559 inst.instruction |= neon_logbits (et.size) << 18;
16560
16561 neon_dp_fixup (&inst);
16562 }
16563
16564 static void
16565 do_neon_sli (void)
16566 {
16567 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16568 struct neon_type_el et = neon_check_type (2, rs,
16569 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16570 int imm = inst.operands[2].imm;
16571 constraint (imm < 0 || (unsigned)imm >= et.size,
16572 _("immediate out of range for insert"));
16573 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16574 }
16575
16576 static void
16577 do_neon_sri (void)
16578 {
16579 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16580 struct neon_type_el et = neon_check_type (2, rs,
16581 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16582 int imm = inst.operands[2].imm;
16583 constraint (imm < 1 || (unsigned)imm > et.size,
16584 _("immediate out of range for insert"));
16585 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
16586 }
16587
16588 static void
16589 do_neon_qshlu_imm (void)
16590 {
16591 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16592 struct neon_type_el et = neon_check_type (2, rs,
16593 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
16594 int imm = inst.operands[2].imm;
16595 constraint (imm < 0 || (unsigned)imm >= et.size,
16596 _("immediate out of range for shift"));
16597 /* Only encodes the 'U present' variant of the instruction.
16598 In this case, signed types have OP (bit 8) set to 0.
16599 Unsigned types have OP set to 1. */
16600 inst.instruction |= (et.type == NT_unsigned) << 8;
16601 /* The rest of the bits are the same as other immediate shifts. */
16602 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16603 }
16604
16605 static void
16606 do_neon_qmovn (void)
16607 {
16608 struct neon_type_el et = neon_check_type (2, NS_DQ,
16609 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16610 /* Saturating move where operands can be signed or unsigned, and the
16611 destination has the same signedness. */
16612 NEON_ENCODE (INTEGER, inst);
16613 if (et.type == NT_unsigned)
16614 inst.instruction |= 0xc0;
16615 else
16616 inst.instruction |= 0x80;
16617 neon_two_same (0, 1, et.size / 2);
16618 }
16619
16620 static void
16621 do_neon_qmovun (void)
16622 {
16623 struct neon_type_el et = neon_check_type (2, NS_DQ,
16624 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16625 /* Saturating move with unsigned results. Operands must be signed. */
16626 NEON_ENCODE (INTEGER, inst);
16627 neon_two_same (0, 1, et.size / 2);
16628 }
16629
16630 static void
16631 do_neon_rshift_sat_narrow (void)
16632 {
16633 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16634 or unsigned. If operands are unsigned, results must also be unsigned. */
16635 struct neon_type_el et = neon_check_type (2, NS_DQI,
16636 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16637 int imm = inst.operands[2].imm;
16638 /* This gets the bounds check, size encoding and immediate bits calculation
16639 right. */
16640 et.size /= 2;
16641
16642 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16643 VQMOVN.I<size> <Dd>, <Qm>. */
16644 if (imm == 0)
16645 {
16646 inst.operands[2].present = 0;
16647 inst.instruction = N_MNEM_vqmovn;
16648 do_neon_qmovn ();
16649 return;
16650 }
16651
16652 constraint (imm < 1 || (unsigned)imm > et.size,
16653 _("immediate out of range"));
16654 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
16655 }
16656
16657 static void
16658 do_neon_rshift_sat_narrow_u (void)
16659 {
16660 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16661 or unsigned. If operands are unsigned, results must also be unsigned. */
16662 struct neon_type_el et = neon_check_type (2, NS_DQI,
16663 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16664 int imm = inst.operands[2].imm;
16665 /* This gets the bounds check, size encoding and immediate bits calculation
16666 right. */
16667 et.size /= 2;
16668
16669 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16670 VQMOVUN.I<size> <Dd>, <Qm>. */
16671 if (imm == 0)
16672 {
16673 inst.operands[2].present = 0;
16674 inst.instruction = N_MNEM_vqmovun;
16675 do_neon_qmovun ();
16676 return;
16677 }
16678
16679 constraint (imm < 1 || (unsigned)imm > et.size,
16680 _("immediate out of range"));
16681 /* FIXME: The manual is kind of unclear about what value U should have in
16682 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16683 must be 1. */
16684 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
16685 }
16686
16687 static void
16688 do_neon_movn (void)
16689 {
16690 struct neon_type_el et = neon_check_type (2, NS_DQ,
16691 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16692 NEON_ENCODE (INTEGER, inst);
16693 neon_two_same (0, 1, et.size / 2);
16694 }
16695
16696 static void
16697 do_neon_rshift_narrow (void)
16698 {
16699 struct neon_type_el et = neon_check_type (2, NS_DQI,
16700 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16701 int imm = inst.operands[2].imm;
16702 /* This gets the bounds check, size encoding and immediate bits calculation
16703 right. */
16704 et.size /= 2;
16705
16706 /* If immediate is zero then we are a pseudo-instruction for
16707 VMOVN.I<size> <Dd>, <Qm> */
16708 if (imm == 0)
16709 {
16710 inst.operands[2].present = 0;
16711 inst.instruction = N_MNEM_vmovn;
16712 do_neon_movn ();
16713 return;
16714 }
16715
16716 constraint (imm < 1 || (unsigned)imm > et.size,
16717 _("immediate out of range for narrowing operation"));
16718 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
16719 }
16720
16721 static void
16722 do_neon_shll (void)
16723 {
16724 /* FIXME: Type checking when lengthening. */
16725 struct neon_type_el et = neon_check_type (2, NS_QDI,
16726 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
16727 unsigned imm = inst.operands[2].imm;
16728
16729 if (imm == et.size)
16730 {
16731 /* Maximum shift variant. */
16732 NEON_ENCODE (INTEGER, inst);
16733 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16734 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16735 inst.instruction |= LOW4 (inst.operands[1].reg);
16736 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16737 inst.instruction |= neon_logbits (et.size) << 18;
16738
16739 neon_dp_fixup (&inst);
16740 }
16741 else
16742 {
16743 /* A more-specific type check for non-max versions. */
16744 et = neon_check_type (2, NS_QDI,
16745 N_EQK | N_DBL, N_SU_32 | N_KEY);
16746 NEON_ENCODE (IMMED, inst);
16747 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
16748 }
16749 }
16750
16751 /* Check the various types for the VCVT instruction, and return which version
16752 the current instruction is. */
16753
16754 #define CVT_FLAVOUR_VAR \
16755 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16756 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16757 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16758 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16759 /* Half-precision conversions. */ \
16760 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16761 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16762 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16763 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16764 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16765 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16766 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16767 Compared with single/double precision variants, only the co-processor \
16768 field is different, so the encoding flow is reused here. */ \
16769 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16770 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16771 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16772 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16773 /* VFP instructions. */ \
16774 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16775 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16776 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16777 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16778 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16779 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16780 /* VFP instructions with bitshift. */ \
16781 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16782 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16783 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16784 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16785 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16786 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16787 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16788 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16789
16790 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16791 neon_cvt_flavour_##C,
16792
16793 /* The different types of conversions we can do. */
16794 enum neon_cvt_flavour
16795 {
16796 CVT_FLAVOUR_VAR
16797 neon_cvt_flavour_invalid,
16798 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
16799 };
16800
16801 #undef CVT_VAR
16802
16803 static enum neon_cvt_flavour
16804 get_neon_cvt_flavour (enum neon_shape rs)
16805 {
16806 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16807 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16808 if (et.type != NT_invtype) \
16809 { \
16810 inst.error = NULL; \
16811 return (neon_cvt_flavour_##C); \
16812 }
16813
16814 struct neon_type_el et;
16815 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16816 || rs == NS_FF) ? N_VFP : 0;
16817 /* The instruction versions which take an immediate take one register
16818 argument, which is extended to the width of the full register. Thus the
16819 "source" and "destination" registers must have the same width. Hack that
16820 here by making the size equal to the key (wider, in this case) operand. */
16821 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16822
16823 CVT_FLAVOUR_VAR;
16824
16825 return neon_cvt_flavour_invalid;
16826 #undef CVT_VAR
16827 }
16828
16829 enum neon_cvt_mode
16830 {
16831 neon_cvt_mode_a,
16832 neon_cvt_mode_n,
16833 neon_cvt_mode_p,
16834 neon_cvt_mode_m,
16835 neon_cvt_mode_z,
16836 neon_cvt_mode_x,
16837 neon_cvt_mode_r
16838 };
16839
16840 /* Neon-syntax VFP conversions. */
16841
16842 static void
16843 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16844 {
16845 const char *opname = 0;
16846
16847 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16848 || rs == NS_FHI || rs == NS_HFI)
16849 {
16850 /* Conversions with immediate bitshift. */
16851 const char *enc[] =
16852 {
16853 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16854 CVT_FLAVOUR_VAR
16855 NULL
16856 #undef CVT_VAR
16857 };
16858
16859 if (flavour < (int) ARRAY_SIZE (enc))
16860 {
16861 opname = enc[flavour];
16862 constraint (inst.operands[0].reg != inst.operands[1].reg,
16863 _("operands 0 and 1 must be the same register"));
16864 inst.operands[1] = inst.operands[2];
16865 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16866 }
16867 }
16868 else
16869 {
16870 /* Conversions without bitshift. */
16871 const char *enc[] =
16872 {
16873 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16874 CVT_FLAVOUR_VAR
16875 NULL
16876 #undef CVT_VAR
16877 };
16878
16879 if (flavour < (int) ARRAY_SIZE (enc))
16880 opname = enc[flavour];
16881 }
16882
16883 if (opname)
16884 do_vfp_nsyn_opcode (opname);
16885
16886 /* ARMv8.2 fp16 VCVT instruction. */
16887 if (flavour == neon_cvt_flavour_s32_f16
16888 || flavour == neon_cvt_flavour_u32_f16
16889 || flavour == neon_cvt_flavour_f16_u32
16890 || flavour == neon_cvt_flavour_f16_s32)
16891 do_scalar_fp16_v82_encode ();
16892 }
16893
16894 static void
16895 do_vfp_nsyn_cvtz (void)
16896 {
16897 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16898 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16899 const char *enc[] =
16900 {
16901 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16902 CVT_FLAVOUR_VAR
16903 NULL
16904 #undef CVT_VAR
16905 };
16906
16907 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16908 do_vfp_nsyn_opcode (enc[flavour]);
16909 }
16910
16911 static void
16912 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16913 enum neon_cvt_mode mode)
16914 {
16915 int sz, op;
16916 int rm;
16917
16918 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16919 D register operands. */
16920 if (flavour == neon_cvt_flavour_s32_f64
16921 || flavour == neon_cvt_flavour_u32_f64)
16922 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16923 _(BAD_FPU));
16924
16925 if (flavour == neon_cvt_flavour_s32_f16
16926 || flavour == neon_cvt_flavour_u32_f16)
16927 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16928 _(BAD_FP16));
16929
16930 set_pred_insn_type (OUTSIDE_PRED_INSN);
16931
16932 switch (flavour)
16933 {
16934 case neon_cvt_flavour_s32_f64:
16935 sz = 1;
16936 op = 1;
16937 break;
16938 case neon_cvt_flavour_s32_f32:
16939 sz = 0;
16940 op = 1;
16941 break;
16942 case neon_cvt_flavour_s32_f16:
16943 sz = 0;
16944 op = 1;
16945 break;
16946 case neon_cvt_flavour_u32_f64:
16947 sz = 1;
16948 op = 0;
16949 break;
16950 case neon_cvt_flavour_u32_f32:
16951 sz = 0;
16952 op = 0;
16953 break;
16954 case neon_cvt_flavour_u32_f16:
16955 sz = 0;
16956 op = 0;
16957 break;
16958 default:
16959 first_error (_("invalid instruction shape"));
16960 return;
16961 }
16962
16963 switch (mode)
16964 {
16965 case neon_cvt_mode_a: rm = 0; break;
16966 case neon_cvt_mode_n: rm = 1; break;
16967 case neon_cvt_mode_p: rm = 2; break;
16968 case neon_cvt_mode_m: rm = 3; break;
16969 default: first_error (_("invalid rounding mode")); return;
16970 }
16971
16972 NEON_ENCODE (FPV8, inst);
16973 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16974 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16975 inst.instruction |= sz << 8;
16976
16977 /* ARMv8.2 fp16 VCVT instruction. */
16978 if (flavour == neon_cvt_flavour_s32_f16
16979 ||flavour == neon_cvt_flavour_u32_f16)
16980 do_scalar_fp16_v82_encode ();
16981 inst.instruction |= op << 7;
16982 inst.instruction |= rm << 16;
16983 inst.instruction |= 0xf0000000;
16984 inst.is_neon = TRUE;
16985 }
16986
16987 static void
16988 do_neon_cvt_1 (enum neon_cvt_mode mode)
16989 {
16990 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16991 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16992 NS_FH, NS_HF, NS_FHI, NS_HFI,
16993 NS_NULL);
16994 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16995
16996 if (flavour == neon_cvt_flavour_invalid)
16997 return;
16998
16999 /* PR11109: Handle round-to-zero for VCVT conversions. */
17000 if (mode == neon_cvt_mode_z
17001 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
17002 && (flavour == neon_cvt_flavour_s16_f16
17003 || flavour == neon_cvt_flavour_u16_f16
17004 || flavour == neon_cvt_flavour_s32_f32
17005 || flavour == neon_cvt_flavour_u32_f32
17006 || flavour == neon_cvt_flavour_s32_f64
17007 || flavour == neon_cvt_flavour_u32_f64)
17008 && (rs == NS_FD || rs == NS_FF))
17009 {
17010 do_vfp_nsyn_cvtz ();
17011 return;
17012 }
17013
17014 /* ARMv8.2 fp16 VCVT conversions. */
17015 if (mode == neon_cvt_mode_z
17016 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
17017 && (flavour == neon_cvt_flavour_s32_f16
17018 || flavour == neon_cvt_flavour_u32_f16)
17019 && (rs == NS_FH))
17020 {
17021 do_vfp_nsyn_cvtz ();
17022 do_scalar_fp16_v82_encode ();
17023 return;
17024 }
17025
17026 /* VFP rather than Neon conversions. */
17027 if (flavour >= neon_cvt_flavour_first_fp)
17028 {
17029 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
17030 do_vfp_nsyn_cvt (rs, flavour);
17031 else
17032 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
17033
17034 return;
17035 }
17036
17037 switch (rs)
17038 {
17039 case NS_QQI:
17040 if (mode == neon_cvt_mode_z
17041 && (flavour == neon_cvt_flavour_f16_s16
17042 || flavour == neon_cvt_flavour_f16_u16
17043 || flavour == neon_cvt_flavour_s16_f16
17044 || flavour == neon_cvt_flavour_u16_f16
17045 || flavour == neon_cvt_flavour_f32_u32
17046 || flavour == neon_cvt_flavour_f32_s32
17047 || flavour == neon_cvt_flavour_s32_f32
17048 || flavour == neon_cvt_flavour_u32_f32))
17049 {
17050 if (check_simd_pred_availability (1, NEON_CHECK_CC | NEON_CHECK_ARCH))
17051 return;
17052 }
17053 else if (mode == neon_cvt_mode_n)
17054 {
17055 /* We are dealing with vcvt with the 'ne' condition. */
17056 inst.cond = 0x1;
17057 inst.instruction = N_MNEM_vcvt;
17058 do_neon_cvt_1 (neon_cvt_mode_z);
17059 return;
17060 }
17061 /* fall through. */
17062 case NS_DDI:
17063 {
17064 unsigned immbits;
17065 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
17066 0x0000100, 0x1000100, 0x0, 0x1000000};
17067
17068 if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17069 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17070 return;
17071
17072 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17073 {
17074 constraint (inst.operands[2].present && inst.operands[2].imm == 0,
17075 _("immediate value out of range"));
17076 switch (flavour)
17077 {
17078 case neon_cvt_flavour_f16_s16:
17079 case neon_cvt_flavour_f16_u16:
17080 case neon_cvt_flavour_s16_f16:
17081 case neon_cvt_flavour_u16_f16:
17082 constraint (inst.operands[2].imm > 16,
17083 _("immediate value out of range"));
17084 break;
17085 case neon_cvt_flavour_f32_u32:
17086 case neon_cvt_flavour_f32_s32:
17087 case neon_cvt_flavour_s32_f32:
17088 case neon_cvt_flavour_u32_f32:
17089 constraint (inst.operands[2].imm > 32,
17090 _("immediate value out of range"));
17091 break;
17092 default:
17093 inst.error = BAD_FPU;
17094 return;
17095 }
17096 }
17097
17098 /* Fixed-point conversion with #0 immediate is encoded as an
17099 integer conversion. */
17100 if (inst.operands[2].present && inst.operands[2].imm == 0)
17101 goto int_encode;
17102 NEON_ENCODE (IMMED, inst);
17103 if (flavour != neon_cvt_flavour_invalid)
17104 inst.instruction |= enctab[flavour];
17105 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17106 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17107 inst.instruction |= LOW4 (inst.operands[1].reg);
17108 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17109 inst.instruction |= neon_quad (rs) << 6;
17110 inst.instruction |= 1 << 21;
17111 if (flavour < neon_cvt_flavour_s16_f16)
17112 {
17113 inst.instruction |= 1 << 21;
17114 immbits = 32 - inst.operands[2].imm;
17115 inst.instruction |= immbits << 16;
17116 }
17117 else
17118 {
17119 inst.instruction |= 3 << 20;
17120 immbits = 16 - inst.operands[2].imm;
17121 inst.instruction |= immbits << 16;
17122 inst.instruction &= ~(1 << 9);
17123 }
17124
17125 neon_dp_fixup (&inst);
17126 }
17127 break;
17128
17129 case NS_QQ:
17130 if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17131 || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
17132 && (flavour == neon_cvt_flavour_s16_f16
17133 || flavour == neon_cvt_flavour_u16_f16
17134 || flavour == neon_cvt_flavour_s32_f32
17135 || flavour == neon_cvt_flavour_u32_f32))
17136 {
17137 if (check_simd_pred_availability (1,
17138 NEON_CHECK_CC | NEON_CHECK_ARCH8))
17139 return;
17140 }
17141 else if (mode == neon_cvt_mode_z
17142 && (flavour == neon_cvt_flavour_f16_s16
17143 || flavour == neon_cvt_flavour_f16_u16
17144 || flavour == neon_cvt_flavour_s16_f16
17145 || flavour == neon_cvt_flavour_u16_f16
17146 || flavour == neon_cvt_flavour_f32_u32
17147 || flavour == neon_cvt_flavour_f32_s32
17148 || flavour == neon_cvt_flavour_s32_f32
17149 || flavour == neon_cvt_flavour_u32_f32))
17150 {
17151 if (check_simd_pred_availability (1,
17152 NEON_CHECK_CC | NEON_CHECK_ARCH))
17153 return;
17154 }
17155 /* fall through. */
17156 case NS_DD:
17157 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
17158 {
17159
17160 NEON_ENCODE (FLOAT, inst);
17161 if (check_simd_pred_availability (1,
17162 NEON_CHECK_CC | NEON_CHECK_ARCH8))
17163 return;
17164
17165 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17166 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17167 inst.instruction |= LOW4 (inst.operands[1].reg);
17168 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17169 inst.instruction |= neon_quad (rs) << 6;
17170 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
17171 || flavour == neon_cvt_flavour_u32_f32) << 7;
17172 inst.instruction |= mode << 8;
17173 if (flavour == neon_cvt_flavour_u16_f16
17174 || flavour == neon_cvt_flavour_s16_f16)
17175 /* Mask off the original size bits and reencode them. */
17176 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
17177
17178 if (thumb_mode)
17179 inst.instruction |= 0xfc000000;
17180 else
17181 inst.instruction |= 0xf0000000;
17182 }
17183 else
17184 {
17185 int_encode:
17186 {
17187 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
17188 0x100, 0x180, 0x0, 0x080};
17189
17190 NEON_ENCODE (INTEGER, inst);
17191
17192 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17193 {
17194 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17195 return;
17196 }
17197
17198 if (flavour != neon_cvt_flavour_invalid)
17199 inst.instruction |= enctab[flavour];
17200
17201 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17202 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17203 inst.instruction |= LOW4 (inst.operands[1].reg);
17204 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17205 inst.instruction |= neon_quad (rs) << 6;
17206 if (flavour >= neon_cvt_flavour_s16_f16
17207 && flavour <= neon_cvt_flavour_f16_u16)
17208 /* Half precision. */
17209 inst.instruction |= 1 << 18;
17210 else
17211 inst.instruction |= 2 << 18;
17212
17213 neon_dp_fixup (&inst);
17214 }
17215 }
17216 break;
17217
17218 /* Half-precision conversions for Advanced SIMD -- neon. */
17219 case NS_QD:
17220 case NS_DQ:
17221 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17222 return;
17223
17224 if ((rs == NS_DQ)
17225 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
17226 {
17227 as_bad (_("operand size must match register width"));
17228 break;
17229 }
17230
17231 if ((rs == NS_QD)
17232 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
17233 {
17234 as_bad (_("operand size must match register width"));
17235 break;
17236 }
17237
17238 if (rs == NS_DQ)
17239 inst.instruction = 0x3b60600;
17240 else
17241 inst.instruction = 0x3b60700;
17242
17243 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17244 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17245 inst.instruction |= LOW4 (inst.operands[1].reg);
17246 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17247 neon_dp_fixup (&inst);
17248 break;
17249
17250 default:
17251 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
17252 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
17253 do_vfp_nsyn_cvt (rs, flavour);
17254 else
17255 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
17256 }
17257 }
17258
17259 static void
17260 do_neon_cvtr (void)
17261 {
17262 do_neon_cvt_1 (neon_cvt_mode_x);
17263 }
17264
17265 static void
17266 do_neon_cvt (void)
17267 {
17268 do_neon_cvt_1 (neon_cvt_mode_z);
17269 }
17270
17271 static void
17272 do_neon_cvta (void)
17273 {
17274 do_neon_cvt_1 (neon_cvt_mode_a);
17275 }
17276
17277 static void
17278 do_neon_cvtn (void)
17279 {
17280 do_neon_cvt_1 (neon_cvt_mode_n);
17281 }
17282
17283 static void
17284 do_neon_cvtp (void)
17285 {
17286 do_neon_cvt_1 (neon_cvt_mode_p);
17287 }
17288
17289 static void
17290 do_neon_cvtm (void)
17291 {
17292 do_neon_cvt_1 (neon_cvt_mode_m);
17293 }
17294
17295 static void
17296 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
17297 {
17298 if (is_double)
17299 mark_feature_used (&fpu_vfp_ext_armv8);
17300
17301 encode_arm_vfp_reg (inst.operands[0].reg,
17302 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
17303 encode_arm_vfp_reg (inst.operands[1].reg,
17304 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
17305 inst.instruction |= to ? 0x10000 : 0;
17306 inst.instruction |= t ? 0x80 : 0;
17307 inst.instruction |= is_double ? 0x100 : 0;
17308 do_vfp_cond_or_thumb ();
17309 }
17310
17311 static void
17312 do_neon_cvttb_1 (bfd_boolean t)
17313 {
17314 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
17315 NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
17316
17317 if (rs == NS_NULL)
17318 return;
17319 else if (rs == NS_QQ || rs == NS_QQI)
17320 {
17321 int single_to_half = 0;
17322 if (check_simd_pred_availability (1, NEON_CHECK_ARCH))
17323 return;
17324
17325 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
17326
17327 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
17328 && (flavour == neon_cvt_flavour_u16_f16
17329 || flavour == neon_cvt_flavour_s16_f16
17330 || flavour == neon_cvt_flavour_f16_s16
17331 || flavour == neon_cvt_flavour_f16_u16
17332 || flavour == neon_cvt_flavour_u32_f32
17333 || flavour == neon_cvt_flavour_s32_f32
17334 || flavour == neon_cvt_flavour_f32_s32
17335 || flavour == neon_cvt_flavour_f32_u32))
17336 {
17337 inst.cond = 0xf;
17338 inst.instruction = N_MNEM_vcvt;
17339 set_pred_insn_type (INSIDE_VPT_INSN);
17340 do_neon_cvt_1 (neon_cvt_mode_z);
17341 return;
17342 }
17343 else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
17344 single_to_half = 1;
17345 else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
17346 {
17347 first_error (BAD_FPU);
17348 return;
17349 }
17350
17351 inst.instruction = 0xee3f0e01;
17352 inst.instruction |= single_to_half << 28;
17353 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17354 inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
17355 inst.instruction |= t << 12;
17356 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17357 inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
17358 inst.is_neon = 1;
17359 }
17360 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
17361 {
17362 inst.error = NULL;
17363 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
17364 }
17365 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
17366 {
17367 inst.error = NULL;
17368 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
17369 }
17370 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
17371 {
17372 /* The VCVTB and VCVTT instructions with D-register operands
17373 don't work for SP only targets. */
17374 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17375 _(BAD_FPU));
17376
17377 inst.error = NULL;
17378 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
17379 }
17380 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
17381 {
17382 /* The VCVTB and VCVTT instructions with D-register operands
17383 don't work for SP only targets. */
17384 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17385 _(BAD_FPU));
17386
17387 inst.error = NULL;
17388 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
17389 }
17390 else
17391 return;
17392 }
17393
17394 static void
17395 do_neon_cvtb (void)
17396 {
17397 do_neon_cvttb_1 (FALSE);
17398 }
17399
17400
17401 static void
17402 do_neon_cvtt (void)
17403 {
17404 do_neon_cvttb_1 (TRUE);
17405 }
17406
17407 static void
17408 neon_move_immediate (void)
17409 {
17410 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
17411 struct neon_type_el et = neon_check_type (2, rs,
17412 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
17413 unsigned immlo, immhi = 0, immbits;
17414 int op, cmode, float_p;
17415
17416 constraint (et.type == NT_invtype,
17417 _("operand size must be specified for immediate VMOV"));
17418
17419 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
17420 op = (inst.instruction & (1 << 5)) != 0;
17421
17422 immlo = inst.operands[1].imm;
17423 if (inst.operands[1].regisimm)
17424 immhi = inst.operands[1].reg;
17425
17426 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
17427 _("immediate has bits set outside the operand size"));
17428
17429 float_p = inst.operands[1].immisfloat;
17430
17431 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
17432 et.size, et.type)) == FAIL)
17433 {
17434 /* Invert relevant bits only. */
17435 neon_invert_size (&immlo, &immhi, et.size);
17436 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17437 with one or the other; those cases are caught by
17438 neon_cmode_for_move_imm. */
17439 op = !op;
17440 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
17441 &op, et.size, et.type)) == FAIL)
17442 {
17443 first_error (_("immediate out of range"));
17444 return;
17445 }
17446 }
17447
17448 inst.instruction &= ~(1 << 5);
17449 inst.instruction |= op << 5;
17450
17451 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17452 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17453 inst.instruction |= neon_quad (rs) << 6;
17454 inst.instruction |= cmode << 8;
17455
17456 neon_write_immbits (immbits);
17457 }
17458
17459 static void
17460 do_neon_mvn (void)
17461 {
17462 if (inst.operands[1].isreg)
17463 {
17464 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17465
17466 NEON_ENCODE (INTEGER, inst);
17467 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17468 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17469 inst.instruction |= LOW4 (inst.operands[1].reg);
17470 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17471 inst.instruction |= neon_quad (rs) << 6;
17472 }
17473 else
17474 {
17475 NEON_ENCODE (IMMED, inst);
17476 neon_move_immediate ();
17477 }
17478
17479 neon_dp_fixup (&inst);
17480 }
17481
17482 /* Encode instructions of form:
17483
17484 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17485 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17486
17487 static void
17488 neon_mixed_length (struct neon_type_el et, unsigned size)
17489 {
17490 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17491 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17492 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17493 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17494 inst.instruction |= LOW4 (inst.operands[2].reg);
17495 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17496 inst.instruction |= (et.type == NT_unsigned) << 24;
17497 inst.instruction |= neon_logbits (size) << 20;
17498
17499 neon_dp_fixup (&inst);
17500 }
17501
17502 static void
17503 do_neon_dyadic_long (void)
17504 {
17505 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
17506 if (rs == NS_QDD)
17507 {
17508 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
17509 return;
17510
17511 NEON_ENCODE (INTEGER, inst);
17512 /* FIXME: Type checking for lengthening op. */
17513 struct neon_type_el et = neon_check_type (3, NS_QDD,
17514 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
17515 neon_mixed_length (et, et.size);
17516 }
17517 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
17518 && (inst.cond == 0xf || inst.cond == 0x10))
17519 {
17520 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17521 in an IT block with le/lt conditions. */
17522
17523 if (inst.cond == 0xf)
17524 inst.cond = 0xb;
17525 else if (inst.cond == 0x10)
17526 inst.cond = 0xd;
17527
17528 inst.pred_insn_type = INSIDE_IT_INSN;
17529
17530 if (inst.instruction == N_MNEM_vaddl)
17531 {
17532 inst.instruction = N_MNEM_vadd;
17533 do_neon_addsub_if_i ();
17534 }
17535 else if (inst.instruction == N_MNEM_vsubl)
17536 {
17537 inst.instruction = N_MNEM_vsub;
17538 do_neon_addsub_if_i ();
17539 }
17540 else if (inst.instruction == N_MNEM_vabdl)
17541 {
17542 inst.instruction = N_MNEM_vabd;
17543 do_neon_dyadic_if_su ();
17544 }
17545 }
17546 else
17547 first_error (BAD_FPU);
17548 }
17549
17550 static void
17551 do_neon_abal (void)
17552 {
17553 struct neon_type_el et = neon_check_type (3, NS_QDD,
17554 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
17555 neon_mixed_length (et, et.size);
17556 }
17557
17558 static void
17559 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
17560 {
17561 if (inst.operands[2].isscalar)
17562 {
17563 struct neon_type_el et = neon_check_type (3, NS_QDS,
17564 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
17565 NEON_ENCODE (SCALAR, inst);
17566 neon_mul_mac (et, et.type == NT_unsigned);
17567 }
17568 else
17569 {
17570 struct neon_type_el et = neon_check_type (3, NS_QDD,
17571 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
17572 NEON_ENCODE (INTEGER, inst);
17573 neon_mixed_length (et, et.size);
17574 }
17575 }
17576
17577 static void
17578 do_neon_mac_maybe_scalar_long (void)
17579 {
17580 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
17581 }
17582
17583 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17584 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17585
17586 static unsigned
17587 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
17588 {
17589 unsigned regno = NEON_SCALAR_REG (scalar);
17590 unsigned elno = NEON_SCALAR_INDEX (scalar);
17591
17592 if (quad_p)
17593 {
17594 if (regno > 7 || elno > 3)
17595 goto bad_scalar;
17596
17597 return ((regno & 0x7)
17598 | ((elno & 0x1) << 3)
17599 | (((elno >> 1) & 0x1) << 5));
17600 }
17601 else
17602 {
17603 if (regno > 15 || elno > 1)
17604 goto bad_scalar;
17605
17606 return (((regno & 0x1) << 5)
17607 | ((regno >> 1) & 0x7)
17608 | ((elno & 0x1) << 3));
17609 }
17610
17611 bad_scalar:
17612 first_error (_("scalar out of range for multiply instruction"));
17613 return 0;
17614 }
17615
17616 static void
17617 do_neon_fmac_maybe_scalar_long (int subtype)
17618 {
17619 enum neon_shape rs;
17620 int high8;
17621 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17622 field (bits[21:20]) has different meaning. For scalar index variant, it's
17623 used to differentiate add and subtract, otherwise it's with fixed value
17624 0x2. */
17625 int size = -1;
17626
17627 if (inst.cond != COND_ALWAYS)
17628 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17629 "behaviour is UNPREDICTABLE"));
17630
17631 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
17632 _(BAD_FP16));
17633
17634 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17635 _(BAD_FPU));
17636
17637 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17638 be a scalar index register. */
17639 if (inst.operands[2].isscalar)
17640 {
17641 high8 = 0xfe000000;
17642 if (subtype)
17643 size = 16;
17644 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
17645 }
17646 else
17647 {
17648 high8 = 0xfc000000;
17649 size = 32;
17650 if (subtype)
17651 inst.instruction |= (0x1 << 23);
17652 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
17653 }
17654
17655 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
17656
17657 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17658 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17659 so we simply pass -1 as size. */
17660 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
17661 neon_three_same (quad_p, 0, size);
17662
17663 /* Undo neon_dp_fixup. Redo the high eight bits. */
17664 inst.instruction &= 0x00ffffff;
17665 inst.instruction |= high8;
17666
17667 #define LOW1(R) ((R) & 0x1)
17668 #define HI4(R) (((R) >> 1) & 0xf)
17669 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17670 whether the instruction is in Q form and whether Vm is a scalar indexed
17671 operand. */
17672 if (inst.operands[2].isscalar)
17673 {
17674 unsigned rm
17675 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
17676 inst.instruction &= 0xffffffd0;
17677 inst.instruction |= rm;
17678
17679 if (!quad_p)
17680 {
17681 /* Redo Rn as well. */
17682 inst.instruction &= 0xfff0ff7f;
17683 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17684 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17685 }
17686 }
17687 else if (!quad_p)
17688 {
17689 /* Redo Rn and Rm. */
17690 inst.instruction &= 0xfff0ff50;
17691 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17692 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17693 inst.instruction |= HI4 (inst.operands[2].reg);
17694 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
17695 }
17696 }
17697
17698 static void
17699 do_neon_vfmal (void)
17700 {
17701 return do_neon_fmac_maybe_scalar_long (0);
17702 }
17703
17704 static void
17705 do_neon_vfmsl (void)
17706 {
17707 return do_neon_fmac_maybe_scalar_long (1);
17708 }
17709
17710 static void
17711 do_neon_dyadic_wide (void)
17712 {
17713 struct neon_type_el et = neon_check_type (3, NS_QQD,
17714 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
17715 neon_mixed_length (et, et.size);
17716 }
17717
17718 static void
17719 do_neon_dyadic_narrow (void)
17720 {
17721 struct neon_type_el et = neon_check_type (3, NS_QDD,
17722 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
17723 /* Operand sign is unimportant, and the U bit is part of the opcode,
17724 so force the operand type to integer. */
17725 et.type = NT_integer;
17726 neon_mixed_length (et, et.size / 2);
17727 }
17728
17729 static void
17730 do_neon_mul_sat_scalar_long (void)
17731 {
17732 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
17733 }
17734
17735 static void
17736 do_neon_vmull (void)
17737 {
17738 if (inst.operands[2].isscalar)
17739 do_neon_mac_maybe_scalar_long ();
17740 else
17741 {
17742 struct neon_type_el et = neon_check_type (3, NS_QDD,
17743 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
17744
17745 if (et.type == NT_poly)
17746 NEON_ENCODE (POLY, inst);
17747 else
17748 NEON_ENCODE (INTEGER, inst);
17749
17750 /* For polynomial encoding the U bit must be zero, and the size must
17751 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17752 obviously, as 0b10). */
17753 if (et.size == 64)
17754 {
17755 /* Check we're on the correct architecture. */
17756 if (!mark_feature_used (&fpu_crypto_ext_armv8))
17757 inst.error =
17758 _("Instruction form not available on this architecture.");
17759
17760 et.size = 32;
17761 }
17762
17763 neon_mixed_length (et, et.size);
17764 }
17765 }
17766
17767 static void
17768 do_neon_ext (void)
17769 {
17770 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17771 struct neon_type_el et = neon_check_type (3, rs,
17772 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
17773 unsigned imm = (inst.operands[3].imm * et.size) / 8;
17774
17775 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
17776 _("shift out of range"));
17777 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17778 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17779 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17780 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17781 inst.instruction |= LOW4 (inst.operands[2].reg);
17782 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17783 inst.instruction |= neon_quad (rs) << 6;
17784 inst.instruction |= imm << 8;
17785
17786 neon_dp_fixup (&inst);
17787 }
17788
17789 static void
17790 do_neon_rev (void)
17791 {
17792 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17793 struct neon_type_el et = neon_check_type (2, rs,
17794 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17795 unsigned op = (inst.instruction >> 7) & 3;
17796 /* N (width of reversed regions) is encoded as part of the bitmask. We
17797 extract it here to check the elements to be reversed are smaller.
17798 Otherwise we'd get a reserved instruction. */
17799 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
17800 gas_assert (elsize != 0);
17801 constraint (et.size >= elsize,
17802 _("elements must be smaller than reversal region"));
17803 neon_two_same (neon_quad (rs), 1, et.size);
17804 }
17805
17806 static void
17807 do_neon_dup (void)
17808 {
17809 if (inst.operands[1].isscalar)
17810 {
17811 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
17812 struct neon_type_el et = neon_check_type (2, rs,
17813 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17814 unsigned sizebits = et.size >> 3;
17815 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
17816 int logsize = neon_logbits (et.size);
17817 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
17818
17819 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
17820 return;
17821
17822 NEON_ENCODE (SCALAR, inst);
17823 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17824 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17825 inst.instruction |= LOW4 (dm);
17826 inst.instruction |= HI1 (dm) << 5;
17827 inst.instruction |= neon_quad (rs) << 6;
17828 inst.instruction |= x << 17;
17829 inst.instruction |= sizebits << 16;
17830
17831 neon_dp_fixup (&inst);
17832 }
17833 else
17834 {
17835 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
17836 struct neon_type_el et = neon_check_type (2, rs,
17837 N_8 | N_16 | N_32 | N_KEY, N_EQK);
17838 /* Duplicate ARM register to lanes of vector. */
17839 NEON_ENCODE (ARMREG, inst);
17840 switch (et.size)
17841 {
17842 case 8: inst.instruction |= 0x400000; break;
17843 case 16: inst.instruction |= 0x000020; break;
17844 case 32: inst.instruction |= 0x000000; break;
17845 default: break;
17846 }
17847 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17848 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
17849 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
17850 inst.instruction |= neon_quad (rs) << 21;
17851 /* The encoding for this instruction is identical for the ARM and Thumb
17852 variants, except for the condition field. */
17853 do_vfp_cond_or_thumb ();
17854 }
17855 }
17856
17857 /* VMOV has particularly many variations. It can be one of:
17858 0. VMOV<c><q> <Qd>, <Qm>
17859 1. VMOV<c><q> <Dd>, <Dm>
17860 (Register operations, which are VORR with Rm = Rn.)
17861 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17862 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17863 (Immediate loads.)
17864 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17865 (ARM register to scalar.)
17866 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17867 (Two ARM registers to vector.)
17868 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17869 (Scalar to ARM register.)
17870 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17871 (Vector to two ARM registers.)
17872 8. VMOV.F32 <Sd>, <Sm>
17873 9. VMOV.F64 <Dd>, <Dm>
17874 (VFP register moves.)
17875 10. VMOV.F32 <Sd>, #imm
17876 11. VMOV.F64 <Dd>, #imm
17877 (VFP float immediate load.)
17878 12. VMOV <Rd>, <Sm>
17879 (VFP single to ARM reg.)
17880 13. VMOV <Sd>, <Rm>
17881 (ARM reg to VFP single.)
17882 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17883 (Two ARM regs to two VFP singles.)
17884 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17885 (Two VFP singles to two ARM regs.)
17886
17887 These cases can be disambiguated using neon_select_shape, except cases 1/9
17888 and 3/11 which depend on the operand type too.
17889
17890 All the encoded bits are hardcoded by this function.
17891
17892 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17893 Cases 5, 7 may be used with VFPv2 and above.
17894
17895 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17896 can specify a type where it doesn't make sense to, and is ignored). */
17897
17898 static void
17899 do_neon_mov (void)
17900 {
17901 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
17902 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
17903 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
17904 NS_HR, NS_RH, NS_HI, NS_NULL);
17905 struct neon_type_el et;
17906 const char *ldconst = 0;
17907
17908 switch (rs)
17909 {
17910 case NS_DD: /* case 1/9. */
17911 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17912 /* It is not an error here if no type is given. */
17913 inst.error = NULL;
17914 if (et.type == NT_float && et.size == 64)
17915 {
17916 do_vfp_nsyn_opcode ("fcpyd");
17917 break;
17918 }
17919 /* fall through. */
17920
17921 case NS_QQ: /* case 0/1. */
17922 {
17923 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17924 return;
17925 /* The architecture manual I have doesn't explicitly state which
17926 value the U bit should have for register->register moves, but
17927 the equivalent VORR instruction has U = 0, so do that. */
17928 inst.instruction = 0x0200110;
17929 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17930 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17931 inst.instruction |= LOW4 (inst.operands[1].reg);
17932 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17933 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17934 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17935 inst.instruction |= neon_quad (rs) << 6;
17936
17937 neon_dp_fixup (&inst);
17938 }
17939 break;
17940
17941 case NS_DI: /* case 3/11. */
17942 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17943 inst.error = NULL;
17944 if (et.type == NT_float && et.size == 64)
17945 {
17946 /* case 11 (fconstd). */
17947 ldconst = "fconstd";
17948 goto encode_fconstd;
17949 }
17950 /* fall through. */
17951
17952 case NS_QI: /* case 2/3. */
17953 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17954 return;
17955 inst.instruction = 0x0800010;
17956 neon_move_immediate ();
17957 neon_dp_fixup (&inst);
17958 break;
17959
17960 case NS_SR: /* case 4. */
17961 {
17962 unsigned bcdebits = 0;
17963 int logsize;
17964 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17965 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17966
17967 /* .<size> is optional here, defaulting to .32. */
17968 if (inst.vectype.elems == 0
17969 && inst.operands[0].vectype.type == NT_invtype
17970 && inst.operands[1].vectype.type == NT_invtype)
17971 {
17972 inst.vectype.el[0].type = NT_untyped;
17973 inst.vectype.el[0].size = 32;
17974 inst.vectype.elems = 1;
17975 }
17976
17977 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17978 logsize = neon_logbits (et.size);
17979
17980 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17981 _(BAD_FPU));
17982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17983 && et.size != 32, _(BAD_FPU));
17984 constraint (et.type == NT_invtype, _("bad type for scalar"));
17985 constraint (x >= 64 / et.size, _("scalar index out of range"));
17986
17987 switch (et.size)
17988 {
17989 case 8: bcdebits = 0x8; break;
17990 case 16: bcdebits = 0x1; break;
17991 case 32: bcdebits = 0x0; break;
17992 default: ;
17993 }
17994
17995 bcdebits |= x << logsize;
17996
17997 inst.instruction = 0xe000b10;
17998 do_vfp_cond_or_thumb ();
17999 inst.instruction |= LOW4 (dn) << 16;
18000 inst.instruction |= HI1 (dn) << 7;
18001 inst.instruction |= inst.operands[1].reg << 12;
18002 inst.instruction |= (bcdebits & 3) << 5;
18003 inst.instruction |= (bcdebits >> 2) << 21;
18004 }
18005 break;
18006
18007 case NS_DRR: /* case 5 (fmdrr). */
18008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
18009 _(BAD_FPU));
18010
18011 inst.instruction = 0xc400b10;
18012 do_vfp_cond_or_thumb ();
18013 inst.instruction |= LOW4 (inst.operands[0].reg);
18014 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
18015 inst.instruction |= inst.operands[1].reg << 12;
18016 inst.instruction |= inst.operands[2].reg << 16;
18017 break;
18018
18019 case NS_RS: /* case 6. */
18020 {
18021 unsigned logsize;
18022 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
18023 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
18024 unsigned abcdebits = 0;
18025
18026 /* .<dt> is optional here, defaulting to .32. */
18027 if (inst.vectype.elems == 0
18028 && inst.operands[0].vectype.type == NT_invtype
18029 && inst.operands[1].vectype.type == NT_invtype)
18030 {
18031 inst.vectype.el[0].type = NT_untyped;
18032 inst.vectype.el[0].size = 32;
18033 inst.vectype.elems = 1;
18034 }
18035
18036 et = neon_check_type (2, NS_NULL,
18037 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
18038 logsize = neon_logbits (et.size);
18039
18040 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
18041 _(BAD_FPU));
18042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
18043 && et.size != 32, _(BAD_FPU));
18044 constraint (et.type == NT_invtype, _("bad type for scalar"));
18045 constraint (x >= 64 / et.size, _("scalar index out of range"));
18046
18047 switch (et.size)
18048 {
18049 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
18050 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
18051 case 32: abcdebits = 0x00; break;
18052 default: ;
18053 }
18054
18055 abcdebits |= x << logsize;
18056 inst.instruction = 0xe100b10;
18057 do_vfp_cond_or_thumb ();
18058 inst.instruction |= LOW4 (dn) << 16;
18059 inst.instruction |= HI1 (dn) << 7;
18060 inst.instruction |= inst.operands[0].reg << 12;
18061 inst.instruction |= (abcdebits & 3) << 5;
18062 inst.instruction |= (abcdebits >> 2) << 21;
18063 }
18064 break;
18065
18066 case NS_RRD: /* case 7 (fmrrd). */
18067 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
18068 _(BAD_FPU));
18069
18070 inst.instruction = 0xc500b10;
18071 do_vfp_cond_or_thumb ();
18072 inst.instruction |= inst.operands[0].reg << 12;
18073 inst.instruction |= inst.operands[1].reg << 16;
18074 inst.instruction |= LOW4 (inst.operands[2].reg);
18075 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
18076 break;
18077
18078 case NS_FF: /* case 8 (fcpys). */
18079 do_vfp_nsyn_opcode ("fcpys");
18080 break;
18081
18082 case NS_HI:
18083 case NS_FI: /* case 10 (fconsts). */
18084 ldconst = "fconsts";
18085 encode_fconstd:
18086 if (!inst.operands[1].immisfloat)
18087 {
18088 unsigned new_imm;
18089 /* Immediate has to fit in 8 bits so float is enough. */
18090 float imm = (float) inst.operands[1].imm;
18091 memcpy (&new_imm, &imm, sizeof (float));
18092 /* But the assembly may have been written to provide an integer
18093 bit pattern that equates to a float, so check that the
18094 conversion has worked. */
18095 if (is_quarter_float (new_imm))
18096 {
18097 if (is_quarter_float (inst.operands[1].imm))
18098 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
18099
18100 inst.operands[1].imm = new_imm;
18101 inst.operands[1].immisfloat = 1;
18102 }
18103 }
18104
18105 if (is_quarter_float (inst.operands[1].imm))
18106 {
18107 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
18108 do_vfp_nsyn_opcode (ldconst);
18109
18110 /* ARMv8.2 fp16 vmov.f16 instruction. */
18111 if (rs == NS_HI)
18112 do_scalar_fp16_v82_encode ();
18113 }
18114 else
18115 first_error (_("immediate out of range"));
18116 break;
18117
18118 case NS_RH:
18119 case NS_RF: /* case 12 (fmrs). */
18120 do_vfp_nsyn_opcode ("fmrs");
18121 /* ARMv8.2 fp16 vmov.f16 instruction. */
18122 if (rs == NS_RH)
18123 do_scalar_fp16_v82_encode ();
18124 break;
18125
18126 case NS_HR:
18127 case NS_FR: /* case 13 (fmsr). */
18128 do_vfp_nsyn_opcode ("fmsr");
18129 /* ARMv8.2 fp16 vmov.f16 instruction. */
18130 if (rs == NS_HR)
18131 do_scalar_fp16_v82_encode ();
18132 break;
18133
18134 /* The encoders for the fmrrs and fmsrr instructions expect three operands
18135 (one of which is a list), but we have parsed four. Do some fiddling to
18136 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
18137 expect. */
18138 case NS_RRFF: /* case 14 (fmrrs). */
18139 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
18140 _("VFP registers must be adjacent"));
18141 inst.operands[2].imm = 2;
18142 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
18143 do_vfp_nsyn_opcode ("fmrrs");
18144 break;
18145
18146 case NS_FFRR: /* case 15 (fmsrr). */
18147 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
18148 _("VFP registers must be adjacent"));
18149 inst.operands[1] = inst.operands[2];
18150 inst.operands[2] = inst.operands[3];
18151 inst.operands[0].imm = 2;
18152 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
18153 do_vfp_nsyn_opcode ("fmsrr");
18154 break;
18155
18156 case NS_NULL:
18157 /* neon_select_shape has determined that the instruction
18158 shape is wrong and has already set the error message. */
18159 break;
18160
18161 default:
18162 abort ();
18163 }
18164 }
18165
18166 static void
18167 do_neon_rshift_round_imm (void)
18168 {
18169 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18170 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
18171 int imm = inst.operands[2].imm;
18172
18173 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
18174 if (imm == 0)
18175 {
18176 inst.operands[2].present = 0;
18177 do_neon_mov ();
18178 return;
18179 }
18180
18181 constraint (imm < 1 || (unsigned)imm > et.size,
18182 _("immediate out of range for shift"));
18183 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
18184 et.size - imm);
18185 }
18186
18187 static void
18188 do_neon_movhf (void)
18189 {
18190 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
18191 constraint (rs != NS_HH, _("invalid suffix"));
18192
18193 if (inst.cond != COND_ALWAYS)
18194 {
18195 if (thumb_mode)
18196 {
18197 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
18198 " the behaviour is UNPREDICTABLE"));
18199 }
18200 else
18201 {
18202 inst.error = BAD_COND;
18203 return;
18204 }
18205 }
18206
18207 do_vfp_sp_monadic ();
18208
18209 inst.is_neon = 1;
18210 inst.instruction |= 0xf0000000;
18211 }
18212
18213 static void
18214 do_neon_movl (void)
18215 {
18216 struct neon_type_el et = neon_check_type (2, NS_QD,
18217 N_EQK | N_DBL, N_SU_32 | N_KEY);
18218 unsigned sizebits = et.size >> 3;
18219 inst.instruction |= sizebits << 19;
18220 neon_two_same (0, et.type == NT_unsigned, -1);
18221 }
18222
18223 static void
18224 do_neon_trn (void)
18225 {
18226 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18227 struct neon_type_el et = neon_check_type (2, rs,
18228 N_EQK, N_8 | N_16 | N_32 | N_KEY);
18229 NEON_ENCODE (INTEGER, inst);
18230 neon_two_same (neon_quad (rs), 1, et.size);
18231 }
18232
18233 static void
18234 do_neon_zip_uzp (void)
18235 {
18236 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18237 struct neon_type_el et = neon_check_type (2, rs,
18238 N_EQK, N_8 | N_16 | N_32 | N_KEY);
18239 if (rs == NS_DD && et.size == 32)
18240 {
18241 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
18242 inst.instruction = N_MNEM_vtrn;
18243 do_neon_trn ();
18244 return;
18245 }
18246 neon_two_same (neon_quad (rs), 1, et.size);
18247 }
18248
18249 static void
18250 do_neon_sat_abs_neg (void)
18251 {
18252 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18253 struct neon_type_el et = neon_check_type (2, rs,
18254 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18255 neon_two_same (neon_quad (rs), 1, et.size);
18256 }
18257
18258 static void
18259 do_neon_pair_long (void)
18260 {
18261 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18262 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18263 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
18264 inst.instruction |= (et.type == NT_unsigned) << 7;
18265 neon_two_same (neon_quad (rs), 1, et.size);
18266 }
18267
18268 static void
18269 do_neon_recip_est (void)
18270 {
18271 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18272 struct neon_type_el et = neon_check_type (2, rs,
18273 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
18274 inst.instruction |= (et.type == NT_float) << 8;
18275 neon_two_same (neon_quad (rs), 1, et.size);
18276 }
18277
18278 static void
18279 do_neon_cls (void)
18280 {
18281 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18282 struct neon_type_el et = neon_check_type (2, rs,
18283 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18284 neon_two_same (neon_quad (rs), 1, et.size);
18285 }
18286
18287 static void
18288 do_neon_clz (void)
18289 {
18290 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18291 struct neon_type_el et = neon_check_type (2, rs,
18292 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
18293 neon_two_same (neon_quad (rs), 1, et.size);
18294 }
18295
18296 static void
18297 do_neon_cnt (void)
18298 {
18299 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18300 struct neon_type_el et = neon_check_type (2, rs,
18301 N_EQK | N_INT, N_8 | N_KEY);
18302 neon_two_same (neon_quad (rs), 1, et.size);
18303 }
18304
18305 static void
18306 do_neon_swp (void)
18307 {
18308 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18309 neon_two_same (neon_quad (rs), 1, -1);
18310 }
18311
18312 static void
18313 do_neon_tbl_tbx (void)
18314 {
18315 unsigned listlenbits;
18316 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
18317
18318 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
18319 {
18320 first_error (_("bad list length for table lookup"));
18321 return;
18322 }
18323
18324 listlenbits = inst.operands[1].imm - 1;
18325 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18326 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18327 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18328 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18329 inst.instruction |= LOW4 (inst.operands[2].reg);
18330 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
18331 inst.instruction |= listlenbits << 8;
18332
18333 neon_dp_fixup (&inst);
18334 }
18335
18336 static void
18337 do_neon_ldm_stm (void)
18338 {
18339 /* P, U and L bits are part of bitmask. */
18340 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
18341 unsigned offsetbits = inst.operands[1].imm * 2;
18342
18343 if (inst.operands[1].issingle)
18344 {
18345 do_vfp_nsyn_ldm_stm (is_dbmode);
18346 return;
18347 }
18348
18349 constraint (is_dbmode && !inst.operands[0].writeback,
18350 _("writeback (!) must be used for VLDMDB and VSTMDB"));
18351
18352 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
18353 _("register list must contain at least 1 and at most 16 "
18354 "registers"));
18355
18356 inst.instruction |= inst.operands[0].reg << 16;
18357 inst.instruction |= inst.operands[0].writeback << 21;
18358 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
18359 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
18360
18361 inst.instruction |= offsetbits;
18362
18363 do_vfp_cond_or_thumb ();
18364 }
18365
18366 static void
18367 do_neon_ldr_str (void)
18368 {
18369 int is_ldr = (inst.instruction & (1 << 20)) != 0;
18370
18371 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
18372 And is UNPREDICTABLE in thumb mode. */
18373 if (!is_ldr
18374 && inst.operands[1].reg == REG_PC
18375 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
18376 {
18377 if (thumb_mode)
18378 inst.error = _("Use of PC here is UNPREDICTABLE");
18379 else if (warn_on_deprecated)
18380 as_tsktsk (_("Use of PC here is deprecated"));
18381 }
18382
18383 if (inst.operands[0].issingle)
18384 {
18385 if (is_ldr)
18386 do_vfp_nsyn_opcode ("flds");
18387 else
18388 do_vfp_nsyn_opcode ("fsts");
18389
18390 /* ARMv8.2 vldr.16/vstr.16 instruction. */
18391 if (inst.vectype.el[0].size == 16)
18392 do_scalar_fp16_v82_encode ();
18393 }
18394 else
18395 {
18396 if (is_ldr)
18397 do_vfp_nsyn_opcode ("fldd");
18398 else
18399 do_vfp_nsyn_opcode ("fstd");
18400 }
18401 }
18402
18403 static void
18404 do_t_vldr_vstr_sysreg (void)
18405 {
18406 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
18407 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
18408
18409 /* Use of PC is UNPREDICTABLE. */
18410 if (inst.operands[1].reg == REG_PC)
18411 inst.error = _("Use of PC here is UNPREDICTABLE");
18412
18413 if (inst.operands[1].immisreg)
18414 inst.error = _("instruction does not accept register index");
18415
18416 if (!inst.operands[1].isreg)
18417 inst.error = _("instruction does not accept PC-relative addressing");
18418
18419 if (abs (inst.operands[1].imm) >= (1 << 7))
18420 inst.error = _("immediate value out of range");
18421
18422 inst.instruction = 0xec000f80;
18423 if (is_vldr)
18424 inst.instruction |= 1 << sysreg_vldr_bitno;
18425 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
18426 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
18427 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
18428 }
18429
18430 static void
18431 do_vldr_vstr (void)
18432 {
18433 bfd_boolean sysreg_op = !inst.operands[0].isreg;
18434
18435 /* VLDR/VSTR (System Register). */
18436 if (sysreg_op)
18437 {
18438 if (!mark_feature_used (&arm_ext_v8_1m_main))
18439 as_bad (_("Instruction not permitted on this architecture"));
18440
18441 do_t_vldr_vstr_sysreg ();
18442 }
18443 /* VLDR/VSTR. */
18444 else
18445 {
18446 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
18447 as_bad (_("Instruction not permitted on this architecture"));
18448 do_neon_ldr_str ();
18449 }
18450 }
18451
18452 /* "interleave" version also handles non-interleaving register VLD1/VST1
18453 instructions. */
18454
18455 static void
18456 do_neon_ld_st_interleave (void)
18457 {
18458 struct neon_type_el et = neon_check_type (1, NS_NULL,
18459 N_8 | N_16 | N_32 | N_64);
18460 unsigned alignbits = 0;
18461 unsigned idx;
18462 /* The bits in this table go:
18463 0: register stride of one (0) or two (1)
18464 1,2: register list length, minus one (1, 2, 3, 4).
18465 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18466 We use -1 for invalid entries. */
18467 const int typetable[] =
18468 {
18469 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18470 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18471 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18472 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18473 };
18474 int typebits;
18475
18476 if (et.type == NT_invtype)
18477 return;
18478
18479 if (inst.operands[1].immisalign)
18480 switch (inst.operands[1].imm >> 8)
18481 {
18482 case 64: alignbits = 1; break;
18483 case 128:
18484 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
18485 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18486 goto bad_alignment;
18487 alignbits = 2;
18488 break;
18489 case 256:
18490 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18491 goto bad_alignment;
18492 alignbits = 3;
18493 break;
18494 default:
18495 bad_alignment:
18496 first_error (_("bad alignment"));
18497 return;
18498 }
18499
18500 inst.instruction |= alignbits << 4;
18501 inst.instruction |= neon_logbits (et.size) << 6;
18502
18503 /* Bits [4:6] of the immediate in a list specifier encode register stride
18504 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18505 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18506 up the right value for "type" in a table based on this value and the given
18507 list style, then stick it back. */
18508 idx = ((inst.operands[0].imm >> 4) & 7)
18509 | (((inst.instruction >> 8) & 3) << 3);
18510
18511 typebits = typetable[idx];
18512
18513 constraint (typebits == -1, _("bad list type for instruction"));
18514 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
18515 BAD_EL_TYPE);
18516
18517 inst.instruction &= ~0xf00;
18518 inst.instruction |= typebits << 8;
18519 }
18520
18521 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18522 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18523 otherwise. The variable arguments are a list of pairs of legal (size, align)
18524 values, terminated with -1. */
18525
18526 static int
18527 neon_alignment_bit (int size, int align, int *do_alignment, ...)
18528 {
18529 va_list ap;
18530 int result = FAIL, thissize, thisalign;
18531
18532 if (!inst.operands[1].immisalign)
18533 {
18534 *do_alignment = 0;
18535 return SUCCESS;
18536 }
18537
18538 va_start (ap, do_alignment);
18539
18540 do
18541 {
18542 thissize = va_arg (ap, int);
18543 if (thissize == -1)
18544 break;
18545 thisalign = va_arg (ap, int);
18546
18547 if (size == thissize && align == thisalign)
18548 result = SUCCESS;
18549 }
18550 while (result != SUCCESS);
18551
18552 va_end (ap);
18553
18554 if (result == SUCCESS)
18555 *do_alignment = 1;
18556 else
18557 first_error (_("unsupported alignment for instruction"));
18558
18559 return result;
18560 }
18561
18562 static void
18563 do_neon_ld_st_lane (void)
18564 {
18565 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18566 int align_good, do_alignment = 0;
18567 int logsize = neon_logbits (et.size);
18568 int align = inst.operands[1].imm >> 8;
18569 int n = (inst.instruction >> 8) & 3;
18570 int max_el = 64 / et.size;
18571
18572 if (et.type == NT_invtype)
18573 return;
18574
18575 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
18576 _("bad list length"));
18577 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
18578 _("scalar index out of range"));
18579 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
18580 && et.size == 8,
18581 _("stride of 2 unavailable when element size is 8"));
18582
18583 switch (n)
18584 {
18585 case 0: /* VLD1 / VST1. */
18586 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
18587 32, 32, -1);
18588 if (align_good == FAIL)
18589 return;
18590 if (do_alignment)
18591 {
18592 unsigned alignbits = 0;
18593 switch (et.size)
18594 {
18595 case 16: alignbits = 0x1; break;
18596 case 32: alignbits = 0x3; break;
18597 default: ;
18598 }
18599 inst.instruction |= alignbits << 4;
18600 }
18601 break;
18602
18603 case 1: /* VLD2 / VST2. */
18604 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
18605 16, 32, 32, 64, -1);
18606 if (align_good == FAIL)
18607 return;
18608 if (do_alignment)
18609 inst.instruction |= 1 << 4;
18610 break;
18611
18612 case 2: /* VLD3 / VST3. */
18613 constraint (inst.operands[1].immisalign,
18614 _("can't use alignment with this instruction"));
18615 break;
18616
18617 case 3: /* VLD4 / VST4. */
18618 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18619 16, 64, 32, 64, 32, 128, -1);
18620 if (align_good == FAIL)
18621 return;
18622 if (do_alignment)
18623 {
18624 unsigned alignbits = 0;
18625 switch (et.size)
18626 {
18627 case 8: alignbits = 0x1; break;
18628 case 16: alignbits = 0x1; break;
18629 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
18630 default: ;
18631 }
18632 inst.instruction |= alignbits << 4;
18633 }
18634 break;
18635
18636 default: ;
18637 }
18638
18639 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18640 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18641 inst.instruction |= 1 << (4 + logsize);
18642
18643 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
18644 inst.instruction |= logsize << 10;
18645 }
18646
18647 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18648
18649 static void
18650 do_neon_ld_dup (void)
18651 {
18652 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18653 int align_good, do_alignment = 0;
18654
18655 if (et.type == NT_invtype)
18656 return;
18657
18658 switch ((inst.instruction >> 8) & 3)
18659 {
18660 case 0: /* VLD1. */
18661 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
18662 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18663 &do_alignment, 16, 16, 32, 32, -1);
18664 if (align_good == FAIL)
18665 return;
18666 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
18667 {
18668 case 1: break;
18669 case 2: inst.instruction |= 1 << 5; break;
18670 default: first_error (_("bad list length")); return;
18671 }
18672 inst.instruction |= neon_logbits (et.size) << 6;
18673 break;
18674
18675 case 1: /* VLD2. */
18676 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18677 &do_alignment, 8, 16, 16, 32, 32, 64,
18678 -1);
18679 if (align_good == FAIL)
18680 return;
18681 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
18682 _("bad list length"));
18683 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18684 inst.instruction |= 1 << 5;
18685 inst.instruction |= neon_logbits (et.size) << 6;
18686 break;
18687
18688 case 2: /* VLD3. */
18689 constraint (inst.operands[1].immisalign,
18690 _("can't use alignment with this instruction"));
18691 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
18692 _("bad list length"));
18693 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18694 inst.instruction |= 1 << 5;
18695 inst.instruction |= neon_logbits (et.size) << 6;
18696 break;
18697
18698 case 3: /* VLD4. */
18699 {
18700 int align = inst.operands[1].imm >> 8;
18701 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18702 16, 64, 32, 64, 32, 128, -1);
18703 if (align_good == FAIL)
18704 return;
18705 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
18706 _("bad list length"));
18707 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18708 inst.instruction |= 1 << 5;
18709 if (et.size == 32 && align == 128)
18710 inst.instruction |= 0x3 << 6;
18711 else
18712 inst.instruction |= neon_logbits (et.size) << 6;
18713 }
18714 break;
18715
18716 default: ;
18717 }
18718
18719 inst.instruction |= do_alignment << 4;
18720 }
18721
18722 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18723 apart from bits [11:4]. */
18724
18725 static void
18726 do_neon_ldx_stx (void)
18727 {
18728 if (inst.operands[1].isreg)
18729 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18730
18731 switch (NEON_LANE (inst.operands[0].imm))
18732 {
18733 case NEON_INTERLEAVE_LANES:
18734 NEON_ENCODE (INTERLV, inst);
18735 do_neon_ld_st_interleave ();
18736 break;
18737
18738 case NEON_ALL_LANES:
18739 NEON_ENCODE (DUP, inst);
18740 if (inst.instruction == N_INV)
18741 {
18742 first_error ("only loads support such operands");
18743 break;
18744 }
18745 do_neon_ld_dup ();
18746 break;
18747
18748 default:
18749 NEON_ENCODE (LANE, inst);
18750 do_neon_ld_st_lane ();
18751 }
18752
18753 /* L bit comes from bit mask. */
18754 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18755 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18756 inst.instruction |= inst.operands[1].reg << 16;
18757
18758 if (inst.operands[1].postind)
18759 {
18760 int postreg = inst.operands[1].imm & 0xf;
18761 constraint (!inst.operands[1].immisreg,
18762 _("post-index must be a register"));
18763 constraint (postreg == 0xd || postreg == 0xf,
18764 _("bad register for post-index"));
18765 inst.instruction |= postreg;
18766 }
18767 else
18768 {
18769 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
18770 constraint (inst.relocs[0].exp.X_op != O_constant
18771 || inst.relocs[0].exp.X_add_number != 0,
18772 BAD_ADDR_MODE);
18773
18774 if (inst.operands[1].writeback)
18775 {
18776 inst.instruction |= 0xd;
18777 }
18778 else
18779 inst.instruction |= 0xf;
18780 }
18781
18782 if (thumb_mode)
18783 inst.instruction |= 0xf9000000;
18784 else
18785 inst.instruction |= 0xf4000000;
18786 }
18787
18788 /* FP v8. */
18789 static void
18790 do_vfp_nsyn_fpv8 (enum neon_shape rs)
18791 {
18792 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18793 D register operands. */
18794 if (neon_shape_class[rs] == SC_DOUBLE)
18795 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18796 _(BAD_FPU));
18797
18798 NEON_ENCODE (FPV8, inst);
18799
18800 if (rs == NS_FFF || rs == NS_HHH)
18801 {
18802 do_vfp_sp_dyadic ();
18803
18804 /* ARMv8.2 fp16 instruction. */
18805 if (rs == NS_HHH)
18806 do_scalar_fp16_v82_encode ();
18807 }
18808 else
18809 do_vfp_dp_rd_rn_rm ();
18810
18811 if (rs == NS_DDD)
18812 inst.instruction |= 0x100;
18813
18814 inst.instruction |= 0xf0000000;
18815 }
18816
18817 static void
18818 do_vsel (void)
18819 {
18820 set_pred_insn_type (OUTSIDE_PRED_INSN);
18821
18822 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
18823 first_error (_("invalid instruction shape"));
18824 }
18825
18826 static void
18827 do_vmaxnm (void)
18828 {
18829 set_pred_insn_type (OUTSIDE_PRED_INSN);
18830
18831 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
18832 return;
18833
18834 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18835 return;
18836
18837 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
18838 }
18839
18840 static void
18841 do_vrint_1 (enum neon_cvt_mode mode)
18842 {
18843 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
18844 struct neon_type_el et;
18845
18846 if (rs == NS_NULL)
18847 return;
18848
18849 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18850 D register operands. */
18851 if (neon_shape_class[rs] == SC_DOUBLE)
18852 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18853 _(BAD_FPU));
18854
18855 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
18856 | N_VFP);
18857 if (et.type != NT_invtype)
18858 {
18859 /* VFP encodings. */
18860 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
18861 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
18862 set_pred_insn_type (OUTSIDE_PRED_INSN);
18863
18864 NEON_ENCODE (FPV8, inst);
18865 if (rs == NS_FF || rs == NS_HH)
18866 do_vfp_sp_monadic ();
18867 else
18868 do_vfp_dp_rd_rm ();
18869
18870 switch (mode)
18871 {
18872 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
18873 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
18874 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
18875 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
18876 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
18877 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
18878 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
18879 default: abort ();
18880 }
18881
18882 inst.instruction |= (rs == NS_DD) << 8;
18883 do_vfp_cond_or_thumb ();
18884
18885 /* ARMv8.2 fp16 vrint instruction. */
18886 if (rs == NS_HH)
18887 do_scalar_fp16_v82_encode ();
18888 }
18889 else
18890 {
18891 /* Neon encodings (or something broken...). */
18892 inst.error = NULL;
18893 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
18894
18895 if (et.type == NT_invtype)
18896 return;
18897
18898 set_pred_insn_type (OUTSIDE_PRED_INSN);
18899 NEON_ENCODE (FLOAT, inst);
18900
18901 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18902 return;
18903
18904 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18905 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18906 inst.instruction |= LOW4 (inst.operands[1].reg);
18907 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18908 inst.instruction |= neon_quad (rs) << 6;
18909 /* Mask off the original size bits and reencode them. */
18910 inst.instruction = ((inst.instruction & 0xfff3ffff)
18911 | neon_logbits (et.size) << 18);
18912
18913 switch (mode)
18914 {
18915 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
18916 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
18917 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
18918 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
18919 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
18920 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
18921 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
18922 default: abort ();
18923 }
18924
18925 if (thumb_mode)
18926 inst.instruction |= 0xfc000000;
18927 else
18928 inst.instruction |= 0xf0000000;
18929 }
18930 }
18931
18932 static void
18933 do_vrintx (void)
18934 {
18935 do_vrint_1 (neon_cvt_mode_x);
18936 }
18937
18938 static void
18939 do_vrintz (void)
18940 {
18941 do_vrint_1 (neon_cvt_mode_z);
18942 }
18943
18944 static void
18945 do_vrintr (void)
18946 {
18947 do_vrint_1 (neon_cvt_mode_r);
18948 }
18949
18950 static void
18951 do_vrinta (void)
18952 {
18953 do_vrint_1 (neon_cvt_mode_a);
18954 }
18955
18956 static void
18957 do_vrintn (void)
18958 {
18959 do_vrint_1 (neon_cvt_mode_n);
18960 }
18961
18962 static void
18963 do_vrintp (void)
18964 {
18965 do_vrint_1 (neon_cvt_mode_p);
18966 }
18967
18968 static void
18969 do_vrintm (void)
18970 {
18971 do_vrint_1 (neon_cvt_mode_m);
18972 }
18973
18974 static unsigned
18975 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18976 {
18977 unsigned regno = NEON_SCALAR_REG (opnd);
18978 unsigned elno = NEON_SCALAR_INDEX (opnd);
18979
18980 if (elsize == 16 && elno < 2 && regno < 16)
18981 return regno | (elno << 4);
18982 else if (elsize == 32 && elno == 0)
18983 return regno;
18984
18985 first_error (_("scalar out of range"));
18986 return 0;
18987 }
18988
18989 static void
18990 do_vcmla (void)
18991 {
18992 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18993 _(BAD_FPU));
18994 constraint (inst.relocs[0].exp.X_op != O_constant,
18995 _("expression too complex"));
18996 unsigned rot = inst.relocs[0].exp.X_add_number;
18997 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18998 _("immediate out of range"));
18999 rot /= 90;
19000 if (inst.operands[2].isscalar)
19001 {
19002 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
19003 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
19004 N_KEY | N_F16 | N_F32).size;
19005 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
19006 inst.is_neon = 1;
19007 inst.instruction = 0xfe000800;
19008 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19009 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19010 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19011 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19012 inst.instruction |= LOW4 (m);
19013 inst.instruction |= HI1 (m) << 5;
19014 inst.instruction |= neon_quad (rs) << 6;
19015 inst.instruction |= rot << 20;
19016 inst.instruction |= (size == 32) << 23;
19017 }
19018 else
19019 {
19020 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19021 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
19022 N_KEY | N_F16 | N_F32).size;
19023 neon_three_same (neon_quad (rs), 0, -1);
19024 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
19025 inst.instruction |= 0xfc200800;
19026 inst.instruction |= rot << 23;
19027 inst.instruction |= (size == 32) << 20;
19028 }
19029 }
19030
19031 static void
19032 do_vcadd (void)
19033 {
19034 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19035 _(BAD_FPU));
19036 constraint (inst.relocs[0].exp.X_op != O_constant,
19037 _("expression too complex"));
19038 unsigned rot = inst.relocs[0].exp.X_add_number;
19039 constraint (rot != 90 && rot != 270, _("immediate out of range"));
19040 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19041 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
19042 N_KEY | N_F16 | N_F32).size;
19043 neon_three_same (neon_quad (rs), 0, -1);
19044 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
19045 inst.instruction |= 0xfc800800;
19046 inst.instruction |= (rot == 270) << 24;
19047 inst.instruction |= (size == 32) << 20;
19048 }
19049
19050 /* Dot Product instructions encoding support. */
19051
19052 static void
19053 do_neon_dotproduct (int unsigned_p)
19054 {
19055 enum neon_shape rs;
19056 unsigned scalar_oprd2 = 0;
19057 int high8;
19058
19059 if (inst.cond != COND_ALWAYS)
19060 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
19061 "is UNPREDICTABLE"));
19062
19063 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19064 _(BAD_FPU));
19065
19066 /* Dot Product instructions are in three-same D/Q register format or the third
19067 operand can be a scalar index register. */
19068 if (inst.operands[2].isscalar)
19069 {
19070 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
19071 high8 = 0xfe000000;
19072 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
19073 }
19074 else
19075 {
19076 high8 = 0xfc000000;
19077 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
19078 }
19079
19080 if (unsigned_p)
19081 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
19082 else
19083 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
19084
19085 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
19086 Product instruction, so we pass 0 as the "ubit" parameter. And the
19087 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
19088 neon_three_same (neon_quad (rs), 0, 32);
19089
19090 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
19091 different NEON three-same encoding. */
19092 inst.instruction &= 0x00ffffff;
19093 inst.instruction |= high8;
19094 /* Encode 'U' bit which indicates signedness. */
19095 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
19096 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
19097 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
19098 the instruction encoding. */
19099 if (inst.operands[2].isscalar)
19100 {
19101 inst.instruction &= 0xffffffd0;
19102 inst.instruction |= LOW4 (scalar_oprd2);
19103 inst.instruction |= HI1 (scalar_oprd2) << 5;
19104 }
19105 }
19106
19107 /* Dot Product instructions for signed integer. */
19108
19109 static void
19110 do_neon_dotproduct_s (void)
19111 {
19112 return do_neon_dotproduct (0);
19113 }
19114
19115 /* Dot Product instructions for unsigned integer. */
19116
19117 static void
19118 do_neon_dotproduct_u (void)
19119 {
19120 return do_neon_dotproduct (1);
19121 }
19122
19123 /* Crypto v1 instructions. */
19124 static void
19125 do_crypto_2op_1 (unsigned elttype, int op)
19126 {
19127 set_pred_insn_type (OUTSIDE_PRED_INSN);
19128
19129 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
19130 == NT_invtype)
19131 return;
19132
19133 inst.error = NULL;
19134
19135 NEON_ENCODE (INTEGER, inst);
19136 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19137 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19138 inst.instruction |= LOW4 (inst.operands[1].reg);
19139 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19140 if (op != -1)
19141 inst.instruction |= op << 6;
19142
19143 if (thumb_mode)
19144 inst.instruction |= 0xfc000000;
19145 else
19146 inst.instruction |= 0xf0000000;
19147 }
19148
19149 static void
19150 do_crypto_3op_1 (int u, int op)
19151 {
19152 set_pred_insn_type (OUTSIDE_PRED_INSN);
19153
19154 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
19155 N_32 | N_UNT | N_KEY).type == NT_invtype)
19156 return;
19157
19158 inst.error = NULL;
19159
19160 NEON_ENCODE (INTEGER, inst);
19161 neon_three_same (1, u, 8 << op);
19162 }
19163
19164 static void
19165 do_aese (void)
19166 {
19167 do_crypto_2op_1 (N_8, 0);
19168 }
19169
19170 static void
19171 do_aesd (void)
19172 {
19173 do_crypto_2op_1 (N_8, 1);
19174 }
19175
19176 static void
19177 do_aesmc (void)
19178 {
19179 do_crypto_2op_1 (N_8, 2);
19180 }
19181
19182 static void
19183 do_aesimc (void)
19184 {
19185 do_crypto_2op_1 (N_8, 3);
19186 }
19187
19188 static void
19189 do_sha1c (void)
19190 {
19191 do_crypto_3op_1 (0, 0);
19192 }
19193
19194 static void
19195 do_sha1p (void)
19196 {
19197 do_crypto_3op_1 (0, 1);
19198 }
19199
19200 static void
19201 do_sha1m (void)
19202 {
19203 do_crypto_3op_1 (0, 2);
19204 }
19205
19206 static void
19207 do_sha1su0 (void)
19208 {
19209 do_crypto_3op_1 (0, 3);
19210 }
19211
19212 static void
19213 do_sha256h (void)
19214 {
19215 do_crypto_3op_1 (1, 0);
19216 }
19217
19218 static void
19219 do_sha256h2 (void)
19220 {
19221 do_crypto_3op_1 (1, 1);
19222 }
19223
19224 static void
19225 do_sha256su1 (void)
19226 {
19227 do_crypto_3op_1 (1, 2);
19228 }
19229
19230 static void
19231 do_sha1h (void)
19232 {
19233 do_crypto_2op_1 (N_32, -1);
19234 }
19235
19236 static void
19237 do_sha1su1 (void)
19238 {
19239 do_crypto_2op_1 (N_32, 0);
19240 }
19241
19242 static void
19243 do_sha256su0 (void)
19244 {
19245 do_crypto_2op_1 (N_32, 1);
19246 }
19247
19248 static void
19249 do_crc32_1 (unsigned int poly, unsigned int sz)
19250 {
19251 unsigned int Rd = inst.operands[0].reg;
19252 unsigned int Rn = inst.operands[1].reg;
19253 unsigned int Rm = inst.operands[2].reg;
19254
19255 set_pred_insn_type (OUTSIDE_PRED_INSN);
19256 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
19257 inst.instruction |= LOW4 (Rn) << 16;
19258 inst.instruction |= LOW4 (Rm);
19259 inst.instruction |= sz << (thumb_mode ? 4 : 21);
19260 inst.instruction |= poly << (thumb_mode ? 20 : 9);
19261
19262 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
19263 as_warn (UNPRED_REG ("r15"));
19264 }
19265
19266 static void
19267 do_crc32b (void)
19268 {
19269 do_crc32_1 (0, 0);
19270 }
19271
19272 static void
19273 do_crc32h (void)
19274 {
19275 do_crc32_1 (0, 1);
19276 }
19277
19278 static void
19279 do_crc32w (void)
19280 {
19281 do_crc32_1 (0, 2);
19282 }
19283
19284 static void
19285 do_crc32cb (void)
19286 {
19287 do_crc32_1 (1, 0);
19288 }
19289
19290 static void
19291 do_crc32ch (void)
19292 {
19293 do_crc32_1 (1, 1);
19294 }
19295
19296 static void
19297 do_crc32cw (void)
19298 {
19299 do_crc32_1 (1, 2);
19300 }
19301
19302 static void
19303 do_vjcvt (void)
19304 {
19305 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19306 _(BAD_FPU));
19307 neon_check_type (2, NS_FD, N_S32, N_F64);
19308 do_vfp_sp_dp_cvt ();
19309 do_vfp_cond_or_thumb ();
19310 }
19311
19312 \f
19313 /* Overall per-instruction processing. */
19314
19315 /* We need to be able to fix up arbitrary expressions in some statements.
19316 This is so that we can handle symbols that are an arbitrary distance from
19317 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
19318 which returns part of an address in a form which will be valid for
19319 a data instruction. We do this by pushing the expression into a symbol
19320 in the expr_section, and creating a fix for that. */
19321
19322 static void
19323 fix_new_arm (fragS * frag,
19324 int where,
19325 short int size,
19326 expressionS * exp,
19327 int pc_rel,
19328 int reloc)
19329 {
19330 fixS * new_fix;
19331
19332 switch (exp->X_op)
19333 {
19334 case O_constant:
19335 if (pc_rel)
19336 {
19337 /* Create an absolute valued symbol, so we have something to
19338 refer to in the object file. Unfortunately for us, gas's
19339 generic expression parsing will already have folded out
19340 any use of .set foo/.type foo %function that may have
19341 been used to set type information of the target location,
19342 that's being specified symbolically. We have to presume
19343 the user knows what they are doing. */
19344 char name[16 + 8];
19345 symbolS *symbol;
19346
19347 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
19348
19349 symbol = symbol_find_or_make (name);
19350 S_SET_SEGMENT (symbol, absolute_section);
19351 symbol_set_frag (symbol, &zero_address_frag);
19352 S_SET_VALUE (symbol, exp->X_add_number);
19353 exp->X_op = O_symbol;
19354 exp->X_add_symbol = symbol;
19355 exp->X_add_number = 0;
19356 }
19357 /* FALLTHROUGH */
19358 case O_symbol:
19359 case O_add:
19360 case O_subtract:
19361 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
19362 (enum bfd_reloc_code_real) reloc);
19363 break;
19364
19365 default:
19366 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
19367 pc_rel, (enum bfd_reloc_code_real) reloc);
19368 break;
19369 }
19370
19371 /* Mark whether the fix is to a THUMB instruction, or an ARM
19372 instruction. */
19373 new_fix->tc_fix_data = thumb_mode;
19374 }
19375
19376 /* Create a frg for an instruction requiring relaxation. */
19377 static void
19378 output_relax_insn (void)
19379 {
19380 char * to;
19381 symbolS *sym;
19382 int offset;
19383
19384 /* The size of the instruction is unknown, so tie the debug info to the
19385 start of the instruction. */
19386 dwarf2_emit_insn (0);
19387
19388 switch (inst.relocs[0].exp.X_op)
19389 {
19390 case O_symbol:
19391 sym = inst.relocs[0].exp.X_add_symbol;
19392 offset = inst.relocs[0].exp.X_add_number;
19393 break;
19394 case O_constant:
19395 sym = NULL;
19396 offset = inst.relocs[0].exp.X_add_number;
19397 break;
19398 default:
19399 sym = make_expr_symbol (&inst.relocs[0].exp);
19400 offset = 0;
19401 break;
19402 }
19403 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
19404 inst.relax, sym, offset, NULL/*offset, opcode*/);
19405 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
19406 }
19407
19408 /* Write a 32-bit thumb instruction to buf. */
19409 static void
19410 put_thumb32_insn (char * buf, unsigned long insn)
19411 {
19412 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
19413 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
19414 }
19415
19416 static void
19417 output_inst (const char * str)
19418 {
19419 char * to = NULL;
19420
19421 if (inst.error)
19422 {
19423 as_bad ("%s -- `%s'", inst.error, str);
19424 return;
19425 }
19426 if (inst.relax)
19427 {
19428 output_relax_insn ();
19429 return;
19430 }
19431 if (inst.size == 0)
19432 return;
19433
19434 to = frag_more (inst.size);
19435 /* PR 9814: Record the thumb mode into the current frag so that we know
19436 what type of NOP padding to use, if necessary. We override any previous
19437 setting so that if the mode has changed then the NOPS that we use will
19438 match the encoding of the last instruction in the frag. */
19439 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19440
19441 if (thumb_mode && (inst.size > THUMB_SIZE))
19442 {
19443 gas_assert (inst.size == (2 * THUMB_SIZE));
19444 put_thumb32_insn (to, inst.instruction);
19445 }
19446 else if (inst.size > INSN_SIZE)
19447 {
19448 gas_assert (inst.size == (2 * INSN_SIZE));
19449 md_number_to_chars (to, inst.instruction, INSN_SIZE);
19450 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
19451 }
19452 else
19453 md_number_to_chars (to, inst.instruction, inst.size);
19454
19455 int r;
19456 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19457 {
19458 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
19459 fix_new_arm (frag_now, to - frag_now->fr_literal,
19460 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
19461 inst.relocs[r].type);
19462 }
19463
19464 dwarf2_emit_insn (inst.size);
19465 }
19466
19467 static char *
19468 output_it_inst (int cond, int mask, char * to)
19469 {
19470 unsigned long instruction = 0xbf00;
19471
19472 mask &= 0xf;
19473 instruction |= mask;
19474 instruction |= cond << 4;
19475
19476 if (to == NULL)
19477 {
19478 to = frag_more (2);
19479 #ifdef OBJ_ELF
19480 dwarf2_emit_insn (2);
19481 #endif
19482 }
19483
19484 md_number_to_chars (to, instruction, 2);
19485
19486 return to;
19487 }
19488
19489 /* Tag values used in struct asm_opcode's tag field. */
19490 enum opcode_tag
19491 {
19492 OT_unconditional, /* Instruction cannot be conditionalized.
19493 The ARM condition field is still 0xE. */
19494 OT_unconditionalF, /* Instruction cannot be conditionalized
19495 and carries 0xF in its ARM condition field. */
19496 OT_csuffix, /* Instruction takes a conditional suffix. */
19497 OT_csuffixF, /* Some forms of the instruction take a scalar
19498 conditional suffix, others place 0xF where the
19499 condition field would be, others take a vector
19500 conditional suffix. */
19501 OT_cinfix3, /* Instruction takes a conditional infix,
19502 beginning at character index 3. (In
19503 unified mode, it becomes a suffix.) */
19504 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
19505 tsts, cmps, cmns, and teqs. */
19506 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
19507 character index 3, even in unified mode. Used for
19508 legacy instructions where suffix and infix forms
19509 may be ambiguous. */
19510 OT_csuf_or_in3, /* Instruction takes either a conditional
19511 suffix or an infix at character index 3. */
19512 OT_odd_infix_unc, /* This is the unconditional variant of an
19513 instruction that takes a conditional infix
19514 at an unusual position. In unified mode,
19515 this variant will accept a suffix. */
19516 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
19517 are the conditional variants of instructions that
19518 take conditional infixes in unusual positions.
19519 The infix appears at character index
19520 (tag - OT_odd_infix_0). These are not accepted
19521 in unified mode. */
19522 };
19523
19524 /* Subroutine of md_assemble, responsible for looking up the primary
19525 opcode from the mnemonic the user wrote. STR points to the
19526 beginning of the mnemonic.
19527
19528 This is not simply a hash table lookup, because of conditional
19529 variants. Most instructions have conditional variants, which are
19530 expressed with a _conditional affix_ to the mnemonic. If we were
19531 to encode each conditional variant as a literal string in the opcode
19532 table, it would have approximately 20,000 entries.
19533
19534 Most mnemonics take this affix as a suffix, and in unified syntax,
19535 'most' is upgraded to 'all'. However, in the divided syntax, some
19536 instructions take the affix as an infix, notably the s-variants of
19537 the arithmetic instructions. Of those instructions, all but six
19538 have the infix appear after the third character of the mnemonic.
19539
19540 Accordingly, the algorithm for looking up primary opcodes given
19541 an identifier is:
19542
19543 1. Look up the identifier in the opcode table.
19544 If we find a match, go to step U.
19545
19546 2. Look up the last two characters of the identifier in the
19547 conditions table. If we find a match, look up the first N-2
19548 characters of the identifier in the opcode table. If we
19549 find a match, go to step CE.
19550
19551 3. Look up the fourth and fifth characters of the identifier in
19552 the conditions table. If we find a match, extract those
19553 characters from the identifier, and look up the remaining
19554 characters in the opcode table. If we find a match, go
19555 to step CM.
19556
19557 4. Fail.
19558
19559 U. Examine the tag field of the opcode structure, in case this is
19560 one of the six instructions with its conditional infix in an
19561 unusual place. If it is, the tag tells us where to find the
19562 infix; look it up in the conditions table and set inst.cond
19563 accordingly. Otherwise, this is an unconditional instruction.
19564 Again set inst.cond accordingly. Return the opcode structure.
19565
19566 CE. Examine the tag field to make sure this is an instruction that
19567 should receive a conditional suffix. If it is not, fail.
19568 Otherwise, set inst.cond from the suffix we already looked up,
19569 and return the opcode structure.
19570
19571 CM. Examine the tag field to make sure this is an instruction that
19572 should receive a conditional infix after the third character.
19573 If it is not, fail. Otherwise, undo the edits to the current
19574 line of input and proceed as for case CE. */
19575
19576 static const struct asm_opcode *
19577 opcode_lookup (char **str)
19578 {
19579 char *end, *base;
19580 char *affix;
19581 const struct asm_opcode *opcode;
19582 const struct asm_cond *cond;
19583 char save[2];
19584
19585 /* Scan up to the end of the mnemonic, which must end in white space,
19586 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19587 for (base = end = *str; *end != '\0'; end++)
19588 if (*end == ' ' || *end == '.')
19589 break;
19590
19591 if (end == base)
19592 return NULL;
19593
19594 /* Handle a possible width suffix and/or Neon type suffix. */
19595 if (end[0] == '.')
19596 {
19597 int offset = 2;
19598
19599 /* The .w and .n suffixes are only valid if the unified syntax is in
19600 use. */
19601 if (unified_syntax && end[1] == 'w')
19602 inst.size_req = 4;
19603 else if (unified_syntax && end[1] == 'n')
19604 inst.size_req = 2;
19605 else
19606 offset = 0;
19607
19608 inst.vectype.elems = 0;
19609
19610 *str = end + offset;
19611
19612 if (end[offset] == '.')
19613 {
19614 /* See if we have a Neon type suffix (possible in either unified or
19615 non-unified ARM syntax mode). */
19616 if (parse_neon_type (&inst.vectype, str) == FAIL)
19617 return NULL;
19618 }
19619 else if (end[offset] != '\0' && end[offset] != ' ')
19620 return NULL;
19621 }
19622 else
19623 *str = end;
19624
19625 /* Look for unaffixed or special-case affixed mnemonic. */
19626 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19627 end - base);
19628 if (opcode)
19629 {
19630 /* step U */
19631 if (opcode->tag < OT_odd_infix_0)
19632 {
19633 inst.cond = COND_ALWAYS;
19634 return opcode;
19635 }
19636
19637 if (warn_on_deprecated && unified_syntax)
19638 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19639 affix = base + (opcode->tag - OT_odd_infix_0);
19640 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19641 gas_assert (cond);
19642
19643 inst.cond = cond->value;
19644 return opcode;
19645 }
19646 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19647 {
19648 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19649 */
19650 if (end - base < 2)
19651 return NULL;
19652 affix = end - 1;
19653 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
19654 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19655 affix - base);
19656 /* If this opcode can not be vector predicated then don't accept it with a
19657 vector predication code. */
19658 if (opcode && !opcode->mayBeVecPred)
19659 opcode = NULL;
19660 }
19661 if (!opcode || !cond)
19662 {
19663 /* Cannot have a conditional suffix on a mnemonic of less than two
19664 characters. */
19665 if (end - base < 3)
19666 return NULL;
19667
19668 /* Look for suffixed mnemonic. */
19669 affix = end - 2;
19670 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19671 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19672 affix - base);
19673 }
19674
19675 if (opcode && cond)
19676 {
19677 /* step CE */
19678 switch (opcode->tag)
19679 {
19680 case OT_cinfix3_legacy:
19681 /* Ignore conditional suffixes matched on infix only mnemonics. */
19682 break;
19683
19684 case OT_cinfix3:
19685 case OT_cinfix3_deprecated:
19686 case OT_odd_infix_unc:
19687 if (!unified_syntax)
19688 return NULL;
19689 /* Fall through. */
19690
19691 case OT_csuffix:
19692 case OT_csuffixF:
19693 case OT_csuf_or_in3:
19694 inst.cond = cond->value;
19695 return opcode;
19696
19697 case OT_unconditional:
19698 case OT_unconditionalF:
19699 if (thumb_mode)
19700 inst.cond = cond->value;
19701 else
19702 {
19703 /* Delayed diagnostic. */
19704 inst.error = BAD_COND;
19705 inst.cond = COND_ALWAYS;
19706 }
19707 return opcode;
19708
19709 default:
19710 return NULL;
19711 }
19712 }
19713
19714 /* Cannot have a usual-position infix on a mnemonic of less than
19715 six characters (five would be a suffix). */
19716 if (end - base < 6)
19717 return NULL;
19718
19719 /* Look for infixed mnemonic in the usual position. */
19720 affix = base + 3;
19721 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19722 if (!cond)
19723 return NULL;
19724
19725 memcpy (save, affix, 2);
19726 memmove (affix, affix + 2, (end - affix) - 2);
19727 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19728 (end - base) - 2);
19729 memmove (affix + 2, affix, (end - affix) - 2);
19730 memcpy (affix, save, 2);
19731
19732 if (opcode
19733 && (opcode->tag == OT_cinfix3
19734 || opcode->tag == OT_cinfix3_deprecated
19735 || opcode->tag == OT_csuf_or_in3
19736 || opcode->tag == OT_cinfix3_legacy))
19737 {
19738 /* Step CM. */
19739 if (warn_on_deprecated && unified_syntax
19740 && (opcode->tag == OT_cinfix3
19741 || opcode->tag == OT_cinfix3_deprecated))
19742 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19743
19744 inst.cond = cond->value;
19745 return opcode;
19746 }
19747
19748 return NULL;
19749 }
19750
19751 /* This function generates an initial IT instruction, leaving its block
19752 virtually open for the new instructions. Eventually,
19753 the mask will be updated by now_pred_add_mask () each time
19754 a new instruction needs to be included in the IT block.
19755 Finally, the block is closed with close_automatic_it_block ().
19756 The block closure can be requested either from md_assemble (),
19757 a tencode (), or due to a label hook. */
19758
19759 static void
19760 new_automatic_it_block (int cond)
19761 {
19762 now_pred.state = AUTOMATIC_PRED_BLOCK;
19763 now_pred.mask = 0x18;
19764 now_pred.cc = cond;
19765 now_pred.block_length = 1;
19766 mapping_state (MAP_THUMB);
19767 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
19768 now_pred.warn_deprecated = FALSE;
19769 now_pred.insn_cond = TRUE;
19770 }
19771
19772 /* Close an automatic IT block.
19773 See comments in new_automatic_it_block (). */
19774
19775 static void
19776 close_automatic_it_block (void)
19777 {
19778 now_pred.mask = 0x10;
19779 now_pred.block_length = 0;
19780 }
19781
19782 /* Update the mask of the current automatically-generated IT
19783 instruction. See comments in new_automatic_it_block (). */
19784
19785 static void
19786 now_pred_add_mask (int cond)
19787 {
19788 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19789 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19790 | ((bitvalue) << (nbit)))
19791 const int resulting_bit = (cond & 1);
19792
19793 now_pred.mask &= 0xf;
19794 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19795 resulting_bit,
19796 (5 - now_pred.block_length));
19797 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19798 1,
19799 ((5 - now_pred.block_length) - 1));
19800 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
19801
19802 #undef CLEAR_BIT
19803 #undef SET_BIT_VALUE
19804 }
19805
19806 /* The IT blocks handling machinery is accessed through the these functions:
19807 it_fsm_pre_encode () from md_assemble ()
19808 set_pred_insn_type () optional, from the tencode functions
19809 set_pred_insn_type_last () ditto
19810 in_pred_block () ditto
19811 it_fsm_post_encode () from md_assemble ()
19812 force_automatic_it_block_close () from label handling functions
19813
19814 Rationale:
19815 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19816 initializing the IT insn type with a generic initial value depending
19817 on the inst.condition.
19818 2) During the tencode function, two things may happen:
19819 a) The tencode function overrides the IT insn type by
19820 calling either set_pred_insn_type (type) or
19821 set_pred_insn_type_last ().
19822 b) The tencode function queries the IT block state by
19823 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19824
19825 Both set_pred_insn_type and in_pred_block run the internal FSM state
19826 handling function (handle_pred_state), because: a) setting the IT insn
19827 type may incur in an invalid state (exiting the function),
19828 and b) querying the state requires the FSM to be updated.
19829 Specifically we want to avoid creating an IT block for conditional
19830 branches, so it_fsm_pre_encode is actually a guess and we can't
19831 determine whether an IT block is required until the tencode () routine
19832 has decided what type of instruction this actually it.
19833 Because of this, if set_pred_insn_type and in_pred_block have to be
19834 used, set_pred_insn_type has to be called first.
19835
19836 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19837 that determines the insn IT type depending on the inst.cond code.
19838 When a tencode () routine encodes an instruction that can be
19839 either outside an IT block, or, in the case of being inside, has to be
19840 the last one, set_pred_insn_type_last () will determine the proper
19841 IT instruction type based on the inst.cond code. Otherwise,
19842 set_pred_insn_type can be called for overriding that logic or
19843 for covering other cases.
19844
19845 Calling handle_pred_state () may not transition the IT block state to
19846 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19847 still queried. Instead, if the FSM determines that the state should
19848 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19849 after the tencode () function: that's what it_fsm_post_encode () does.
19850
19851 Since in_pred_block () calls the state handling function to get an
19852 updated state, an error may occur (due to invalid insns combination).
19853 In that case, inst.error is set.
19854 Therefore, inst.error has to be checked after the execution of
19855 the tencode () routine.
19856
19857 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19858 any pending state change (if any) that didn't take place in
19859 handle_pred_state () as explained above. */
19860
19861 static void
19862 it_fsm_pre_encode (void)
19863 {
19864 if (inst.cond != COND_ALWAYS)
19865 inst.pred_insn_type = INSIDE_IT_INSN;
19866 else
19867 inst.pred_insn_type = OUTSIDE_PRED_INSN;
19868
19869 now_pred.state_handled = 0;
19870 }
19871
19872 /* IT state FSM handling function. */
19873 /* MVE instructions and non-MVE instructions are handled differently because of
19874 the introduction of VPT blocks.
19875 Specifications say that any non-MVE instruction inside a VPT block is
19876 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19877 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19878 few exceptions we have MVE_UNPREDICABLE_INSN.
19879 The error messages provided depending on the different combinations possible
19880 are described in the cases below:
19881 For 'most' MVE instructions:
19882 1) In an IT block, with an IT code: syntax error
19883 2) In an IT block, with a VPT code: error: must be in a VPT block
19884 3) In an IT block, with no code: warning: UNPREDICTABLE
19885 4) In a VPT block, with an IT code: syntax error
19886 5) In a VPT block, with a VPT code: OK!
19887 6) In a VPT block, with no code: error: missing code
19888 7) Outside a pred block, with an IT code: error: syntax error
19889 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19890 9) Outside a pred block, with no code: OK!
19891 For non-MVE instructions:
19892 10) In an IT block, with an IT code: OK!
19893 11) In an IT block, with a VPT code: syntax error
19894 12) In an IT block, with no code: error: missing code
19895 13) In a VPT block, with an IT code: error: should be in an IT block
19896 14) In a VPT block, with a VPT code: syntax error
19897 15) In a VPT block, with no code: UNPREDICTABLE
19898 16) Outside a pred block, with an IT code: error: should be in an IT block
19899 17) Outside a pred block, with a VPT code: syntax error
19900 18) Outside a pred block, with no code: OK!
19901 */
19902
19903
19904 static int
19905 handle_pred_state (void)
19906 {
19907 now_pred.state_handled = 1;
19908 now_pred.insn_cond = FALSE;
19909
19910 switch (now_pred.state)
19911 {
19912 case OUTSIDE_PRED_BLOCK:
19913 switch (inst.pred_insn_type)
19914 {
19915 case MVE_UNPREDICABLE_INSN:
19916 case MVE_OUTSIDE_PRED_INSN:
19917 if (inst.cond < COND_ALWAYS)
19918 {
19919 /* Case 7: Outside a pred block, with an IT code: error: syntax
19920 error. */
19921 inst.error = BAD_SYNTAX;
19922 return FAIL;
19923 }
19924 /* Case 9: Outside a pred block, with no code: OK! */
19925 break;
19926 case OUTSIDE_PRED_INSN:
19927 if (inst.cond > COND_ALWAYS)
19928 {
19929 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19930 */
19931 inst.error = BAD_SYNTAX;
19932 return FAIL;
19933 }
19934 /* Case 18: Outside a pred block, with no code: OK! */
19935 break;
19936
19937 case INSIDE_VPT_INSN:
19938 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19939 a VPT block. */
19940 inst.error = BAD_OUT_VPT;
19941 return FAIL;
19942
19943 case INSIDE_IT_INSN:
19944 case INSIDE_IT_LAST_INSN:
19945 if (inst.cond < COND_ALWAYS)
19946 {
19947 /* Case 16: Outside a pred block, with an IT code: error: should
19948 be in an IT block. */
19949 if (thumb_mode == 0)
19950 {
19951 if (unified_syntax
19952 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
19953 as_tsktsk (_("Warning: conditional outside an IT block"\
19954 " for Thumb."));
19955 }
19956 else
19957 {
19958 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
19959 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
19960 {
19961 /* Automatically generate the IT instruction. */
19962 new_automatic_it_block (inst.cond);
19963 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
19964 close_automatic_it_block ();
19965 }
19966 else
19967 {
19968 inst.error = BAD_OUT_IT;
19969 return FAIL;
19970 }
19971 }
19972 break;
19973 }
19974 else if (inst.cond > COND_ALWAYS)
19975 {
19976 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19977 */
19978 inst.error = BAD_SYNTAX;
19979 return FAIL;
19980 }
19981 else
19982 gas_assert (0);
19983 case IF_INSIDE_IT_LAST_INSN:
19984 case NEUTRAL_IT_INSN:
19985 break;
19986
19987 case VPT_INSN:
19988 if (inst.cond != COND_ALWAYS)
19989 first_error (BAD_SYNTAX);
19990 now_pred.state = MANUAL_PRED_BLOCK;
19991 now_pred.block_length = 0;
19992 now_pred.type = VECTOR_PRED;
19993 now_pred.cc = 0;
19994 break;
19995 case IT_INSN:
19996 now_pred.state = MANUAL_PRED_BLOCK;
19997 now_pred.block_length = 0;
19998 now_pred.type = SCALAR_PRED;
19999 break;
20000 }
20001 break;
20002
20003 case AUTOMATIC_PRED_BLOCK:
20004 /* Three things may happen now:
20005 a) We should increment current it block size;
20006 b) We should close current it block (closing insn or 4 insns);
20007 c) We should close current it block and start a new one (due
20008 to incompatible conditions or
20009 4 insns-length block reached). */
20010
20011 switch (inst.pred_insn_type)
20012 {
20013 case INSIDE_VPT_INSN:
20014 case VPT_INSN:
20015 case MVE_UNPREDICABLE_INSN:
20016 case MVE_OUTSIDE_PRED_INSN:
20017 gas_assert (0);
20018 case OUTSIDE_PRED_INSN:
20019 /* The closure of the block shall happen immediately,
20020 so any in_pred_block () call reports the block as closed. */
20021 force_automatic_it_block_close ();
20022 break;
20023
20024 case INSIDE_IT_INSN:
20025 case INSIDE_IT_LAST_INSN:
20026 case IF_INSIDE_IT_LAST_INSN:
20027 now_pred.block_length++;
20028
20029 if (now_pred.block_length > 4
20030 || !now_pred_compatible (inst.cond))
20031 {
20032 force_automatic_it_block_close ();
20033 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
20034 new_automatic_it_block (inst.cond);
20035 }
20036 else
20037 {
20038 now_pred.insn_cond = TRUE;
20039 now_pred_add_mask (inst.cond);
20040 }
20041
20042 if (now_pred.state == AUTOMATIC_PRED_BLOCK
20043 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
20044 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
20045 close_automatic_it_block ();
20046 break;
20047
20048 case NEUTRAL_IT_INSN:
20049 now_pred.block_length++;
20050 now_pred.insn_cond = TRUE;
20051
20052 if (now_pred.block_length > 4)
20053 force_automatic_it_block_close ();
20054 else
20055 now_pred_add_mask (now_pred.cc & 1);
20056 break;
20057
20058 case IT_INSN:
20059 close_automatic_it_block ();
20060 now_pred.state = MANUAL_PRED_BLOCK;
20061 break;
20062 }
20063 break;
20064
20065 case MANUAL_PRED_BLOCK:
20066 {
20067 int cond, is_last;
20068 if (now_pred.type == SCALAR_PRED)
20069 {
20070 /* Check conditional suffixes. */
20071 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
20072 now_pred.mask <<= 1;
20073 now_pred.mask &= 0x1f;
20074 is_last = (now_pred.mask == 0x10);
20075 }
20076 else
20077 {
20078 now_pred.cc ^= (now_pred.mask >> 4);
20079 cond = now_pred.cc + 0xf;
20080 now_pred.mask <<= 1;
20081 now_pred.mask &= 0x1f;
20082 is_last = now_pred.mask == 0x10;
20083 }
20084 now_pred.insn_cond = TRUE;
20085
20086 switch (inst.pred_insn_type)
20087 {
20088 case OUTSIDE_PRED_INSN:
20089 if (now_pred.type == SCALAR_PRED)
20090 {
20091 if (inst.cond == COND_ALWAYS)
20092 {
20093 /* Case 12: In an IT block, with no code: error: missing
20094 code. */
20095 inst.error = BAD_NOT_IT;
20096 return FAIL;
20097 }
20098 else if (inst.cond > COND_ALWAYS)
20099 {
20100 /* Case 11: In an IT block, with a VPT code: syntax error.
20101 */
20102 inst.error = BAD_SYNTAX;
20103 return FAIL;
20104 }
20105 else if (thumb_mode)
20106 {
20107 /* This is for some special cases where a non-MVE
20108 instruction is not allowed in an IT block, such as cbz,
20109 but are put into one with a condition code.
20110 You could argue this should be a syntax error, but we
20111 gave the 'not allowed in IT block' diagnostic in the
20112 past so we will keep doing so. */
20113 inst.error = BAD_NOT_IT;
20114 return FAIL;
20115 }
20116 break;
20117 }
20118 else
20119 {
20120 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
20121 as_tsktsk (MVE_NOT_VPT);
20122 return SUCCESS;
20123 }
20124 case MVE_OUTSIDE_PRED_INSN:
20125 if (now_pred.type == SCALAR_PRED)
20126 {
20127 if (inst.cond == COND_ALWAYS)
20128 {
20129 /* Case 3: In an IT block, with no code: warning:
20130 UNPREDICTABLE. */
20131 as_tsktsk (MVE_NOT_IT);
20132 return SUCCESS;
20133 }
20134 else if (inst.cond < COND_ALWAYS)
20135 {
20136 /* Case 1: In an IT block, with an IT code: syntax error.
20137 */
20138 inst.error = BAD_SYNTAX;
20139 return FAIL;
20140 }
20141 else
20142 gas_assert (0);
20143 }
20144 else
20145 {
20146 if (inst.cond < COND_ALWAYS)
20147 {
20148 /* Case 4: In a VPT block, with an IT code: syntax error.
20149 */
20150 inst.error = BAD_SYNTAX;
20151 return FAIL;
20152 }
20153 else if (inst.cond == COND_ALWAYS)
20154 {
20155 /* Case 6: In a VPT block, with no code: error: missing
20156 code. */
20157 inst.error = BAD_NOT_VPT;
20158 return FAIL;
20159 }
20160 else
20161 {
20162 gas_assert (0);
20163 }
20164 }
20165 case MVE_UNPREDICABLE_INSN:
20166 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
20167 return SUCCESS;
20168 case INSIDE_IT_INSN:
20169 if (inst.cond > COND_ALWAYS)
20170 {
20171 /* Case 11: In an IT block, with a VPT code: syntax error. */
20172 /* Case 14: In a VPT block, with a VPT code: syntax error. */
20173 inst.error = BAD_SYNTAX;
20174 return FAIL;
20175 }
20176 else if (now_pred.type == SCALAR_PRED)
20177 {
20178 /* Case 10: In an IT block, with an IT code: OK! */
20179 if (cond != inst.cond)
20180 {
20181 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
20182 BAD_VPT_COND;
20183 return FAIL;
20184 }
20185 }
20186 else
20187 {
20188 /* Case 13: In a VPT block, with an IT code: error: should be
20189 in an IT block. */
20190 inst.error = BAD_OUT_IT;
20191 return FAIL;
20192 }
20193 break;
20194
20195 case INSIDE_VPT_INSN:
20196 if (now_pred.type == SCALAR_PRED)
20197 {
20198 /* Case 2: In an IT block, with a VPT code: error: must be in a
20199 VPT block. */
20200 inst.error = BAD_OUT_VPT;
20201 return FAIL;
20202 }
20203 /* Case 5: In a VPT block, with a VPT code: OK! */
20204 else if (cond != inst.cond)
20205 {
20206 inst.error = BAD_VPT_COND;
20207 return FAIL;
20208 }
20209 break;
20210 case INSIDE_IT_LAST_INSN:
20211 case IF_INSIDE_IT_LAST_INSN:
20212 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
20213 {
20214 /* Case 4: In a VPT block, with an IT code: syntax error. */
20215 /* Case 11: In an IT block, with a VPT code: syntax error. */
20216 inst.error = BAD_SYNTAX;
20217 return FAIL;
20218 }
20219 else if (cond != inst.cond)
20220 {
20221 inst.error = BAD_IT_COND;
20222 return FAIL;
20223 }
20224 if (!is_last)
20225 {
20226 inst.error = BAD_BRANCH;
20227 return FAIL;
20228 }
20229 break;
20230
20231 case NEUTRAL_IT_INSN:
20232 /* The BKPT instruction is unconditional even in a IT or VPT
20233 block. */
20234 break;
20235
20236 case IT_INSN:
20237 if (now_pred.type == SCALAR_PRED)
20238 {
20239 inst.error = BAD_IT_IT;
20240 return FAIL;
20241 }
20242 /* fall through. */
20243 case VPT_INSN:
20244 if (inst.cond == COND_ALWAYS)
20245 {
20246 /* Executing a VPT/VPST instruction inside an IT block or a
20247 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
20248 */
20249 if (now_pred.type == SCALAR_PRED)
20250 as_tsktsk (MVE_NOT_IT);
20251 else
20252 as_tsktsk (MVE_NOT_VPT);
20253 return SUCCESS;
20254 }
20255 else
20256 {
20257 /* VPT/VPST do not accept condition codes. */
20258 inst.error = BAD_SYNTAX;
20259 return FAIL;
20260 }
20261 }
20262 }
20263 break;
20264 }
20265
20266 return SUCCESS;
20267 }
20268
20269 struct depr_insn_mask
20270 {
20271 unsigned long pattern;
20272 unsigned long mask;
20273 const char* description;
20274 };
20275
20276 /* List of 16-bit instruction patterns deprecated in an IT block in
20277 ARMv8. */
20278 static const struct depr_insn_mask depr_it_insns[] = {
20279 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
20280 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
20281 { 0xa000, 0xb800, N_("ADR") },
20282 { 0x4800, 0xf800, N_("Literal loads") },
20283 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
20284 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
20285 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
20286 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
20287 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
20288 { 0, 0, NULL }
20289 };
20290
20291 static void
20292 it_fsm_post_encode (void)
20293 {
20294 int is_last;
20295
20296 if (!now_pred.state_handled)
20297 handle_pred_state ();
20298
20299 if (now_pred.insn_cond
20300 && !now_pred.warn_deprecated
20301 && warn_on_deprecated
20302 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
20303 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
20304 {
20305 if (inst.instruction >= 0x10000)
20306 {
20307 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
20308 "performance deprecated in ARMv8-A and ARMv8-R"));
20309 now_pred.warn_deprecated = TRUE;
20310 }
20311 else
20312 {
20313 const struct depr_insn_mask *p = depr_it_insns;
20314
20315 while (p->mask != 0)
20316 {
20317 if ((inst.instruction & p->mask) == p->pattern)
20318 {
20319 as_tsktsk (_("IT blocks containing 16-bit Thumb "
20320 "instructions of the following class are "
20321 "performance deprecated in ARMv8-A and "
20322 "ARMv8-R: %s"), p->description);
20323 now_pred.warn_deprecated = TRUE;
20324 break;
20325 }
20326
20327 ++p;
20328 }
20329 }
20330
20331 if (now_pred.block_length > 1)
20332 {
20333 as_tsktsk (_("IT blocks containing more than one conditional "
20334 "instruction are performance deprecated in ARMv8-A and "
20335 "ARMv8-R"));
20336 now_pred.warn_deprecated = TRUE;
20337 }
20338 }
20339
20340 is_last = (now_pred.mask == 0x10);
20341 if (is_last)
20342 {
20343 now_pred.state = OUTSIDE_PRED_BLOCK;
20344 now_pred.mask = 0;
20345 }
20346 }
20347
20348 static void
20349 force_automatic_it_block_close (void)
20350 {
20351 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
20352 {
20353 close_automatic_it_block ();
20354 now_pred.state = OUTSIDE_PRED_BLOCK;
20355 now_pred.mask = 0;
20356 }
20357 }
20358
20359 static int
20360 in_pred_block (void)
20361 {
20362 if (!now_pred.state_handled)
20363 handle_pred_state ();
20364
20365 return now_pred.state != OUTSIDE_PRED_BLOCK;
20366 }
20367
20368 /* Whether OPCODE only has T32 encoding. Since this function is only used by
20369 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
20370 here, hence the "known" in the function name. */
20371
20372 static bfd_boolean
20373 known_t32_only_insn (const struct asm_opcode *opcode)
20374 {
20375 /* Original Thumb-1 wide instruction. */
20376 if (opcode->tencode == do_t_blx
20377 || opcode->tencode == do_t_branch23
20378 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
20379 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
20380 return TRUE;
20381
20382 /* Wide-only instruction added to ARMv8-M Baseline. */
20383 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
20384 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
20385 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
20386 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
20387 return TRUE;
20388
20389 return FALSE;
20390 }
20391
20392 /* Whether wide instruction variant can be used if available for a valid OPCODE
20393 in ARCH. */
20394
20395 static bfd_boolean
20396 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
20397 {
20398 if (known_t32_only_insn (opcode))
20399 return TRUE;
20400
20401 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
20402 of variant T3 of B.W is checked in do_t_branch. */
20403 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
20404 && opcode->tencode == do_t_branch)
20405 return TRUE;
20406
20407 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
20408 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
20409 && opcode->tencode == do_t_mov_cmp
20410 /* Make sure CMP instruction is not affected. */
20411 && opcode->aencode == do_mov)
20412 return TRUE;
20413
20414 /* Wide instruction variants of all instructions with narrow *and* wide
20415 variants become available with ARMv6t2. Other opcodes are either
20416 narrow-only or wide-only and are thus available if OPCODE is valid. */
20417 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
20418 return TRUE;
20419
20420 /* OPCODE with narrow only instruction variant or wide variant not
20421 available. */
20422 return FALSE;
20423 }
20424
20425 void
20426 md_assemble (char *str)
20427 {
20428 char *p = str;
20429 const struct asm_opcode * opcode;
20430
20431 /* Align the previous label if needed. */
20432 if (last_label_seen != NULL)
20433 {
20434 symbol_set_frag (last_label_seen, frag_now);
20435 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
20436 S_SET_SEGMENT (last_label_seen, now_seg);
20437 }
20438
20439 memset (&inst, '\0', sizeof (inst));
20440 int r;
20441 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
20442 inst.relocs[r].type = BFD_RELOC_UNUSED;
20443
20444 opcode = opcode_lookup (&p);
20445 if (!opcode)
20446 {
20447 /* It wasn't an instruction, but it might be a register alias of
20448 the form alias .req reg, or a Neon .dn/.qn directive. */
20449 if (! create_register_alias (str, p)
20450 && ! create_neon_reg_alias (str, p))
20451 as_bad (_("bad instruction `%s'"), str);
20452
20453 return;
20454 }
20455
20456 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
20457 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20458
20459 /* The value which unconditional instructions should have in place of the
20460 condition field. */
20461 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
20462
20463 if (thumb_mode)
20464 {
20465 arm_feature_set variant;
20466
20467 variant = cpu_variant;
20468 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20469 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
20470 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
20471 /* Check that this instruction is supported for this CPU. */
20472 if (!opcode->tvariant
20473 || (thumb_mode == 1
20474 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
20475 {
20476 if (opcode->tencode == do_t_swi)
20477 as_bad (_("SVC is not permitted on this architecture"));
20478 else
20479 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
20480 return;
20481 }
20482 if (inst.cond != COND_ALWAYS && !unified_syntax
20483 && opcode->tencode != do_t_branch)
20484 {
20485 as_bad (_("Thumb does not support conditional execution"));
20486 return;
20487 }
20488
20489 /* Two things are addressed here:
20490 1) Implicit require narrow instructions on Thumb-1.
20491 This avoids relaxation accidentally introducing Thumb-2
20492 instructions.
20493 2) Reject wide instructions in non Thumb-2 cores.
20494
20495 Only instructions with narrow and wide variants need to be handled
20496 but selecting all non wide-only instructions is easier. */
20497 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
20498 && !t32_insn_ok (variant, opcode))
20499 {
20500 if (inst.size_req == 0)
20501 inst.size_req = 2;
20502 else if (inst.size_req == 4)
20503 {
20504 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
20505 as_bad (_("selected processor does not support 32bit wide "
20506 "variant of instruction `%s'"), str);
20507 else
20508 as_bad (_("selected processor does not support `%s' in "
20509 "Thumb-2 mode"), str);
20510 return;
20511 }
20512 }
20513
20514 inst.instruction = opcode->tvalue;
20515
20516 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
20517 {
20518 /* Prepare the pred_insn_type for those encodings that don't set
20519 it. */
20520 it_fsm_pre_encode ();
20521
20522 opcode->tencode ();
20523
20524 it_fsm_post_encode ();
20525 }
20526
20527 if (!(inst.error || inst.relax))
20528 {
20529 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
20530 inst.size = (inst.instruction > 0xffff ? 4 : 2);
20531 if (inst.size_req && inst.size_req != inst.size)
20532 {
20533 as_bad (_("cannot honor width suffix -- `%s'"), str);
20534 return;
20535 }
20536 }
20537
20538 /* Something has gone badly wrong if we try to relax a fixed size
20539 instruction. */
20540 gas_assert (inst.size_req == 0 || !inst.relax);
20541
20542 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20543 *opcode->tvariant);
20544 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20545 set those bits when Thumb-2 32-bit instructions are seen. The impact
20546 of relaxable instructions will be considered later after we finish all
20547 relaxation. */
20548 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
20549 variant = arm_arch_none;
20550 else
20551 variant = cpu_variant;
20552 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
20553 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20554 arm_ext_v6t2);
20555
20556 check_neon_suffixes;
20557
20558 if (!inst.error)
20559 {
20560 mapping_state (MAP_THUMB);
20561 }
20562 }
20563 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20564 {
20565 bfd_boolean is_bx;
20566
20567 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20568 is_bx = (opcode->aencode == do_bx);
20569
20570 /* Check that this instruction is supported for this CPU. */
20571 if (!(is_bx && fix_v4bx)
20572 && !(opcode->avariant &&
20573 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
20574 {
20575 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
20576 return;
20577 }
20578 if (inst.size_req)
20579 {
20580 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
20581 return;
20582 }
20583
20584 inst.instruction = opcode->avalue;
20585 if (opcode->tag == OT_unconditionalF)
20586 inst.instruction |= 0xFU << 28;
20587 else
20588 inst.instruction |= inst.cond << 28;
20589 inst.size = INSN_SIZE;
20590 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
20591 {
20592 it_fsm_pre_encode ();
20593 opcode->aencode ();
20594 it_fsm_post_encode ();
20595 }
20596 /* Arm mode bx is marked as both v4T and v5 because it's still required
20597 on a hypothetical non-thumb v5 core. */
20598 if (is_bx)
20599 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
20600 else
20601 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
20602 *opcode->avariant);
20603
20604 check_neon_suffixes;
20605
20606 if (!inst.error)
20607 {
20608 mapping_state (MAP_ARM);
20609 }
20610 }
20611 else
20612 {
20613 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20614 "-- `%s'"), str);
20615 return;
20616 }
20617 output_inst (str);
20618 }
20619
20620 static void
20621 check_pred_blocks_finished (void)
20622 {
20623 #ifdef OBJ_ELF
20624 asection *sect;
20625
20626 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
20627 if (seg_info (sect)->tc_segment_info_data.current_pred.state
20628 == MANUAL_PRED_BLOCK)
20629 {
20630 if (now_pred.type == SCALAR_PRED)
20631 as_warn (_("section '%s' finished with an open IT block."),
20632 sect->name);
20633 else
20634 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20635 sect->name);
20636 }
20637 #else
20638 if (now_pred.state == MANUAL_PRED_BLOCK)
20639 {
20640 if (now_pred.type == SCALAR_PRED)
20641 as_warn (_("file finished with an open IT block."));
20642 else
20643 as_warn (_("file finished with an open VPT/VPST block."));
20644 }
20645 #endif
20646 }
20647
20648 /* Various frobbings of labels and their addresses. */
20649
20650 void
20651 arm_start_line_hook (void)
20652 {
20653 last_label_seen = NULL;
20654 }
20655
20656 void
20657 arm_frob_label (symbolS * sym)
20658 {
20659 last_label_seen = sym;
20660
20661 ARM_SET_THUMB (sym, thumb_mode);
20662
20663 #if defined OBJ_COFF || defined OBJ_ELF
20664 ARM_SET_INTERWORK (sym, support_interwork);
20665 #endif
20666
20667 force_automatic_it_block_close ();
20668
20669 /* Note - do not allow local symbols (.Lxxx) to be labelled
20670 as Thumb functions. This is because these labels, whilst
20671 they exist inside Thumb code, are not the entry points for
20672 possible ARM->Thumb calls. Also, these labels can be used
20673 as part of a computed goto or switch statement. eg gcc
20674 can generate code that looks like this:
20675
20676 ldr r2, [pc, .Laaa]
20677 lsl r3, r3, #2
20678 ldr r2, [r3, r2]
20679 mov pc, r2
20680
20681 .Lbbb: .word .Lxxx
20682 .Lccc: .word .Lyyy
20683 ..etc...
20684 .Laaa: .word Lbbb
20685
20686 The first instruction loads the address of the jump table.
20687 The second instruction converts a table index into a byte offset.
20688 The third instruction gets the jump address out of the table.
20689 The fourth instruction performs the jump.
20690
20691 If the address stored at .Laaa is that of a symbol which has the
20692 Thumb_Func bit set, then the linker will arrange for this address
20693 to have the bottom bit set, which in turn would mean that the
20694 address computation performed by the third instruction would end
20695 up with the bottom bit set. Since the ARM is capable of unaligned
20696 word loads, the instruction would then load the incorrect address
20697 out of the jump table, and chaos would ensue. */
20698 if (label_is_thumb_function_name
20699 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
20700 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
20701 {
20702 /* When the address of a Thumb function is taken the bottom
20703 bit of that address should be set. This will allow
20704 interworking between Arm and Thumb functions to work
20705 correctly. */
20706
20707 THUMB_SET_FUNC (sym, 1);
20708
20709 label_is_thumb_function_name = FALSE;
20710 }
20711
20712 dwarf2_emit_label (sym);
20713 }
20714
20715 bfd_boolean
20716 arm_data_in_code (void)
20717 {
20718 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
20719 {
20720 *input_line_pointer = '/';
20721 input_line_pointer += 5;
20722 *input_line_pointer = 0;
20723 return TRUE;
20724 }
20725
20726 return FALSE;
20727 }
20728
20729 char *
20730 arm_canonicalize_symbol_name (char * name)
20731 {
20732 int len;
20733
20734 if (thumb_mode && (len = strlen (name)) > 5
20735 && streq (name + len - 5, "/data"))
20736 *(name + len - 5) = 0;
20737
20738 return name;
20739 }
20740 \f
20741 /* Table of all register names defined by default. The user can
20742 define additional names with .req. Note that all register names
20743 should appear in both upper and lowercase variants. Some registers
20744 also have mixed-case names. */
20745
20746 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20747 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20748 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20749 #define REGSET(p,t) \
20750 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20751 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20752 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20753 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20754 #define REGSETH(p,t) \
20755 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20756 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20757 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20758 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20759 #define REGSET2(p,t) \
20760 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20761 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20762 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20763 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20764 #define SPLRBANK(base,bank,t) \
20765 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20766 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20767 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20768 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20769 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20770 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20771
20772 static const struct reg_entry reg_names[] =
20773 {
20774 /* ARM integer registers. */
20775 REGSET(r, RN), REGSET(R, RN),
20776
20777 /* ATPCS synonyms. */
20778 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
20779 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
20780 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
20781
20782 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
20783 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
20784 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
20785
20786 /* Well-known aliases. */
20787 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
20788 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
20789
20790 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
20791 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
20792
20793 /* Coprocessor numbers. */
20794 REGSET(p, CP), REGSET(P, CP),
20795
20796 /* Coprocessor register numbers. The "cr" variants are for backward
20797 compatibility. */
20798 REGSET(c, CN), REGSET(C, CN),
20799 REGSET(cr, CN), REGSET(CR, CN),
20800
20801 /* ARM banked registers. */
20802 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
20803 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
20804 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
20805 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
20806 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
20807 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
20808 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
20809
20810 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
20811 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
20812 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
20813 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
20814 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
20815 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
20816 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
20817 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
20818
20819 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
20820 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
20821 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
20822 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
20823 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
20824 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
20825 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
20826 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
20827 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
20828
20829 /* FPA registers. */
20830 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
20831 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
20832
20833 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
20834 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
20835
20836 /* VFP SP registers. */
20837 REGSET(s,VFS), REGSET(S,VFS),
20838 REGSETH(s,VFS), REGSETH(S,VFS),
20839
20840 /* VFP DP Registers. */
20841 REGSET(d,VFD), REGSET(D,VFD),
20842 /* Extra Neon DP registers. */
20843 REGSETH(d,VFD), REGSETH(D,VFD),
20844
20845 /* Neon QP registers. */
20846 REGSET2(q,NQ), REGSET2(Q,NQ),
20847
20848 /* VFP control registers. */
20849 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
20850 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
20851 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
20852 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
20853 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
20854 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
20855 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
20856
20857 /* Maverick DSP coprocessor registers. */
20858 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
20859 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
20860
20861 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
20862 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
20863 REGDEF(dspsc,0,DSPSC),
20864
20865 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
20866 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
20867 REGDEF(DSPSC,0,DSPSC),
20868
20869 /* iWMMXt data registers - p0, c0-15. */
20870 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
20871
20872 /* iWMMXt control registers - p1, c0-3. */
20873 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
20874 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
20875 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
20876 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
20877
20878 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20879 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
20880 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
20881 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
20882 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
20883
20884 /* XScale accumulator registers. */
20885 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
20886 };
20887 #undef REGDEF
20888 #undef REGNUM
20889 #undef REGSET
20890
20891 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20892 within psr_required_here. */
20893 static const struct asm_psr psrs[] =
20894 {
20895 /* Backward compatibility notation. Note that "all" is no longer
20896 truly all possible PSR bits. */
20897 {"all", PSR_c | PSR_f},
20898 {"flg", PSR_f},
20899 {"ctl", PSR_c},
20900
20901 /* Individual flags. */
20902 {"f", PSR_f},
20903 {"c", PSR_c},
20904 {"x", PSR_x},
20905 {"s", PSR_s},
20906
20907 /* Combinations of flags. */
20908 {"fs", PSR_f | PSR_s},
20909 {"fx", PSR_f | PSR_x},
20910 {"fc", PSR_f | PSR_c},
20911 {"sf", PSR_s | PSR_f},
20912 {"sx", PSR_s | PSR_x},
20913 {"sc", PSR_s | PSR_c},
20914 {"xf", PSR_x | PSR_f},
20915 {"xs", PSR_x | PSR_s},
20916 {"xc", PSR_x | PSR_c},
20917 {"cf", PSR_c | PSR_f},
20918 {"cs", PSR_c | PSR_s},
20919 {"cx", PSR_c | PSR_x},
20920 {"fsx", PSR_f | PSR_s | PSR_x},
20921 {"fsc", PSR_f | PSR_s | PSR_c},
20922 {"fxs", PSR_f | PSR_x | PSR_s},
20923 {"fxc", PSR_f | PSR_x | PSR_c},
20924 {"fcs", PSR_f | PSR_c | PSR_s},
20925 {"fcx", PSR_f | PSR_c | PSR_x},
20926 {"sfx", PSR_s | PSR_f | PSR_x},
20927 {"sfc", PSR_s | PSR_f | PSR_c},
20928 {"sxf", PSR_s | PSR_x | PSR_f},
20929 {"sxc", PSR_s | PSR_x | PSR_c},
20930 {"scf", PSR_s | PSR_c | PSR_f},
20931 {"scx", PSR_s | PSR_c | PSR_x},
20932 {"xfs", PSR_x | PSR_f | PSR_s},
20933 {"xfc", PSR_x | PSR_f | PSR_c},
20934 {"xsf", PSR_x | PSR_s | PSR_f},
20935 {"xsc", PSR_x | PSR_s | PSR_c},
20936 {"xcf", PSR_x | PSR_c | PSR_f},
20937 {"xcs", PSR_x | PSR_c | PSR_s},
20938 {"cfs", PSR_c | PSR_f | PSR_s},
20939 {"cfx", PSR_c | PSR_f | PSR_x},
20940 {"csf", PSR_c | PSR_s | PSR_f},
20941 {"csx", PSR_c | PSR_s | PSR_x},
20942 {"cxf", PSR_c | PSR_x | PSR_f},
20943 {"cxs", PSR_c | PSR_x | PSR_s},
20944 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
20945 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
20946 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
20947 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
20948 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
20949 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
20950 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
20951 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
20952 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
20953 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
20954 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
20955 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
20956 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
20957 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
20958 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
20959 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
20960 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
20961 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
20962 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
20963 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
20964 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
20965 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
20966 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
20967 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
20968 };
20969
20970 /* Table of V7M psr names. */
20971 static const struct asm_psr v7m_psrs[] =
20972 {
20973 {"apsr", 0x0 }, {"APSR", 0x0 },
20974 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20975 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20976 {"psr", 0x3 }, {"PSR", 0x3 },
20977 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20978 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20979 {"epsr", 0x6 }, {"EPSR", 0x6 },
20980 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20981 {"msp", 0x8 }, {"MSP", 0x8 },
20982 {"psp", 0x9 }, {"PSP", 0x9 },
20983 {"msplim", 0xa }, {"MSPLIM", 0xa },
20984 {"psplim", 0xb }, {"PSPLIM", 0xb },
20985 {"primask", 0x10}, {"PRIMASK", 0x10},
20986 {"basepri", 0x11}, {"BASEPRI", 0x11},
20987 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20988 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20989 {"control", 0x14}, {"CONTROL", 0x14},
20990 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20991 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20992 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20993 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20994 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20995 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20996 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20997 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20998 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20999 };
21000
21001 /* Table of all shift-in-operand names. */
21002 static const struct asm_shift_name shift_names [] =
21003 {
21004 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
21005 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
21006 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
21007 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
21008 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
21009 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
21010 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
21011 };
21012
21013 /* Table of all explicit relocation names. */
21014 #ifdef OBJ_ELF
21015 static struct reloc_entry reloc_names[] =
21016 {
21017 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
21018 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
21019 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
21020 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
21021 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
21022 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
21023 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
21024 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
21025 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
21026 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
21027 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
21028 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
21029 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
21030 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
21031 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
21032 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
21033 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
21034 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
21035 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
21036 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
21037 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
21038 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
21039 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
21040 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
21041 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
21042 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
21043 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
21044 };
21045 #endif
21046
21047 /* Table of all conditional affixes. */
21048 static const struct asm_cond conds[] =
21049 {
21050 {"eq", 0x0},
21051 {"ne", 0x1},
21052 {"cs", 0x2}, {"hs", 0x2},
21053 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
21054 {"mi", 0x4},
21055 {"pl", 0x5},
21056 {"vs", 0x6},
21057 {"vc", 0x7},
21058 {"hi", 0x8},
21059 {"ls", 0x9},
21060 {"ge", 0xa},
21061 {"lt", 0xb},
21062 {"gt", 0xc},
21063 {"le", 0xd},
21064 {"al", 0xe}
21065 };
21066 static const struct asm_cond vconds[] =
21067 {
21068 {"t", 0xf},
21069 {"e", 0x10}
21070 };
21071
21072 #define UL_BARRIER(L,U,CODE,FEAT) \
21073 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
21074 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
21075
21076 static struct asm_barrier_opt barrier_opt_names[] =
21077 {
21078 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
21079 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
21080 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
21081 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
21082 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
21083 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
21084 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
21085 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
21086 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
21087 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
21088 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
21089 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
21090 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
21091 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
21092 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
21093 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
21094 };
21095
21096 #undef UL_BARRIER
21097
21098 /* Table of ARM-format instructions. */
21099
21100 /* Macros for gluing together operand strings. N.B. In all cases
21101 other than OPS0, the trailing OP_stop comes from default
21102 zero-initialization of the unspecified elements of the array. */
21103 #define OPS0() { OP_stop, }
21104 #define OPS1(a) { OP_##a, }
21105 #define OPS2(a,b) { OP_##a,OP_##b, }
21106 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
21107 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
21108 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
21109 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
21110
21111 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
21112 This is useful when mixing operands for ARM and THUMB, i.e. using the
21113 MIX_ARM_THUMB_OPERANDS macro.
21114 In order to use these macros, prefix the number of operands with _
21115 e.g. _3. */
21116 #define OPS_1(a) { a, }
21117 #define OPS_2(a,b) { a,b, }
21118 #define OPS_3(a,b,c) { a,b,c, }
21119 #define OPS_4(a,b,c,d) { a,b,c,d, }
21120 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
21121 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
21122
21123 /* These macros abstract out the exact format of the mnemonic table and
21124 save some repeated characters. */
21125
21126 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
21127 #define TxCE(mnem, op, top, nops, ops, ae, te) \
21128 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
21129 THUMB_VARIANT, do_##ae, do_##te, 0 }
21130
21131 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
21132 a T_MNEM_xyz enumerator. */
21133 #define TCE(mnem, aop, top, nops, ops, ae, te) \
21134 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
21135 #define tCE(mnem, aop, top, nops, ops, ae, te) \
21136 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21137
21138 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
21139 infix after the third character. */
21140 #define TxC3(mnem, op, top, nops, ops, ae, te) \
21141 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
21142 THUMB_VARIANT, do_##ae, do_##te, 0 }
21143 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
21144 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
21145 THUMB_VARIANT, do_##ae, do_##te, 0 }
21146 #define TC3(mnem, aop, top, nops, ops, ae, te) \
21147 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
21148 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
21149 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
21150 #define tC3(mnem, aop, top, nops, ops, ae, te) \
21151 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21152 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
21153 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21154
21155 /* Mnemonic that cannot be conditionalized. The ARM condition-code
21156 field is still 0xE. Many of the Thumb variants can be executed
21157 conditionally, so this is checked separately. */
21158 #define TUE(mnem, op, top, nops, ops, ae, te) \
21159 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21160 THUMB_VARIANT, do_##ae, do_##te, 0 }
21161
21162 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
21163 Used by mnemonics that have very minimal differences in the encoding for
21164 ARM and Thumb variants and can be handled in a common function. */
21165 #define TUEc(mnem, op, top, nops, ops, en) \
21166 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21167 THUMB_VARIANT, do_##en, do_##en, 0 }
21168
21169 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
21170 condition code field. */
21171 #define TUF(mnem, op, top, nops, ops, ae, te) \
21172 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
21173 THUMB_VARIANT, do_##ae, do_##te, 0 }
21174
21175 /* ARM-only variants of all the above. */
21176 #define CE(mnem, op, nops, ops, ae) \
21177 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21178
21179 #define C3(mnem, op, nops, ops, ae) \
21180 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21181
21182 /* Thumb-only variants of TCE and TUE. */
21183 #define ToC(mnem, top, nops, ops, te) \
21184 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21185 do_##te, 0 }
21186
21187 #define ToU(mnem, top, nops, ops, te) \
21188 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
21189 NULL, do_##te, 0 }
21190
21191 /* T_MNEM_xyz enumerator variants of ToC. */
21192 #define toC(mnem, top, nops, ops, te) \
21193 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
21194 do_##te, 0 }
21195
21196 /* T_MNEM_xyz enumerator variants of ToU. */
21197 #define toU(mnem, top, nops, ops, te) \
21198 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
21199 NULL, do_##te, 0 }
21200
21201 /* Legacy mnemonics that always have conditional infix after the third
21202 character. */
21203 #define CL(mnem, op, nops, ops, ae) \
21204 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21205 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21206
21207 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
21208 #define cCE(mnem, op, nops, ops, ae) \
21209 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21210
21211 /* Legacy coprocessor instructions where conditional infix and conditional
21212 suffix are ambiguous. For consistency this includes all FPA instructions,
21213 not just the potentially ambiguous ones. */
21214 #define cCL(mnem, op, nops, ops, ae) \
21215 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21216 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21217
21218 /* Coprocessor, takes either a suffix or a position-3 infix
21219 (for an FPA corner case). */
21220 #define C3E(mnem, op, nops, ops, ae) \
21221 { mnem, OPS##nops ops, OT_csuf_or_in3, \
21222 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21223
21224 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
21225 { m1 #m2 m3, OPS##nops ops, \
21226 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
21227 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21228
21229 #define CM(m1, m2, op, nops, ops, ae) \
21230 xCM_ (m1, , m2, op, nops, ops, ae), \
21231 xCM_ (m1, eq, m2, op, nops, ops, ae), \
21232 xCM_ (m1, ne, m2, op, nops, ops, ae), \
21233 xCM_ (m1, cs, m2, op, nops, ops, ae), \
21234 xCM_ (m1, hs, m2, op, nops, ops, ae), \
21235 xCM_ (m1, cc, m2, op, nops, ops, ae), \
21236 xCM_ (m1, ul, m2, op, nops, ops, ae), \
21237 xCM_ (m1, lo, m2, op, nops, ops, ae), \
21238 xCM_ (m1, mi, m2, op, nops, ops, ae), \
21239 xCM_ (m1, pl, m2, op, nops, ops, ae), \
21240 xCM_ (m1, vs, m2, op, nops, ops, ae), \
21241 xCM_ (m1, vc, m2, op, nops, ops, ae), \
21242 xCM_ (m1, hi, m2, op, nops, ops, ae), \
21243 xCM_ (m1, ls, m2, op, nops, ops, ae), \
21244 xCM_ (m1, ge, m2, op, nops, ops, ae), \
21245 xCM_ (m1, lt, m2, op, nops, ops, ae), \
21246 xCM_ (m1, gt, m2, op, nops, ops, ae), \
21247 xCM_ (m1, le, m2, op, nops, ops, ae), \
21248 xCM_ (m1, al, m2, op, nops, ops, ae)
21249
21250 #define UE(mnem, op, nops, ops, ae) \
21251 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21252
21253 #define UF(mnem, op, nops, ops, ae) \
21254 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21255
21256 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
21257 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
21258 use the same encoding function for each. */
21259 #define NUF(mnem, op, nops, ops, enc) \
21260 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21261 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21262
21263 /* Neon data processing, version which indirects through neon_enc_tab for
21264 the various overloaded versions of opcodes. */
21265 #define nUF(mnem, op, nops, ops, enc) \
21266 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21267 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21268
21269 /* Neon insn with conditional suffix for the ARM version, non-overloaded
21270 version. */
21271 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21272 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
21273 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21274
21275 #define NCE(mnem, op, nops, ops, enc) \
21276 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21277
21278 #define NCEF(mnem, op, nops, ops, enc) \
21279 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21280
21281 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
21282 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21283 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
21284 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21285
21286 #define nCE(mnem, op, nops, ops, enc) \
21287 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21288
21289 #define nCEF(mnem, op, nops, ops, enc) \
21290 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21291
21292 /* */
21293 #define mCEF(mnem, op, nops, ops, enc) \
21294 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
21295 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21296
21297
21298 /* nCEF but for MVE predicated instructions. */
21299 #define mnCEF(mnem, op, nops, ops, enc) \
21300 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21301
21302 /* nCE but for MVE predicated instructions. */
21303 #define mnCE(mnem, op, nops, ops, enc) \
21304 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21305
21306 /* NUF but for potentially MVE predicated instructions. */
21307 #define MNUF(mnem, op, nops, ops, enc) \
21308 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21309 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21310
21311 /* nUF but for potentially MVE predicated instructions. */
21312 #define mnUF(mnem, op, nops, ops, enc) \
21313 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21314 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21315
21316 /* ToC but for potentially MVE predicated instructions. */
21317 #define mToC(mnem, top, nops, ops, te) \
21318 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21319 do_##te, 1 }
21320
21321 /* NCE but for MVE predicated instructions. */
21322 #define MNCE(mnem, op, nops, ops, enc) \
21323 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21324
21325 /* NCEF but for MVE predicated instructions. */
21326 #define MNCEF(mnem, op, nops, ops, enc) \
21327 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21328 #define do_0 0
21329
21330 static const struct asm_opcode insns[] =
21331 {
21332 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
21333 #define THUMB_VARIANT & arm_ext_v4t
21334 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
21335 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
21336 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
21337 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
21338 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
21339 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
21340 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
21341 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
21342 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
21343 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
21344 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
21345 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
21346 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
21347 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
21348 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
21349 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
21350
21351 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
21352 for setting PSR flag bits. They are obsolete in V6 and do not
21353 have Thumb equivalents. */
21354 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
21355 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
21356 CL("tstp", 110f000, 2, (RR, SH), cmp),
21357 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
21358 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
21359 CL("cmpp", 150f000, 2, (RR, SH), cmp),
21360 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
21361 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
21362 CL("cmnp", 170f000, 2, (RR, SH), cmp),
21363
21364 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
21365 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
21366 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
21367 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
21368
21369 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
21370 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
21371 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
21372 OP_RRnpc),
21373 OP_ADDRGLDR),ldst, t_ldst),
21374 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
21375
21376 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21377 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21378 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21379 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21380 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21381 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21382
21383 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
21384 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
21385
21386 /* Pseudo ops. */
21387 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
21388 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
21389 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
21390 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
21391
21392 /* Thumb-compatibility pseudo ops. */
21393 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
21394 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
21395 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
21396 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
21397 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
21398 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
21399 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
21400 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
21401 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
21402 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
21403 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
21404 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
21405
21406 /* These may simplify to neg. */
21407 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
21408 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
21409
21410 #undef THUMB_VARIANT
21411 #define THUMB_VARIANT & arm_ext_os
21412
21413 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
21414 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
21415
21416 #undef THUMB_VARIANT
21417 #define THUMB_VARIANT & arm_ext_v6
21418
21419 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
21420
21421 /* V1 instructions with no Thumb analogue prior to V6T2. */
21422 #undef THUMB_VARIANT
21423 #define THUMB_VARIANT & arm_ext_v6t2
21424
21425 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
21426 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
21427 CL("teqp", 130f000, 2, (RR, SH), cmp),
21428
21429 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21430 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21431 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
21432 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21433
21434 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21435 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21436
21437 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21438 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21439
21440 /* V1 instructions with no Thumb analogue at all. */
21441 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
21442 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
21443
21444 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
21445 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
21446 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
21447 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
21448 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
21449 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
21450 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
21451 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
21452
21453 #undef ARM_VARIANT
21454 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21455 #undef THUMB_VARIANT
21456 #define THUMB_VARIANT & arm_ext_v4t
21457
21458 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21459 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21460
21461 #undef THUMB_VARIANT
21462 #define THUMB_VARIANT & arm_ext_v6t2
21463
21464 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21465 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
21466
21467 /* Generic coprocessor instructions. */
21468 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21469 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21470 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21471 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21472 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21473 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21474 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
21475
21476 #undef ARM_VARIANT
21477 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21478
21479 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21480 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21481
21482 #undef ARM_VARIANT
21483 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21484 #undef THUMB_VARIANT
21485 #define THUMB_VARIANT & arm_ext_msr
21486
21487 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
21488 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
21489
21490 #undef ARM_VARIANT
21491 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21492 #undef THUMB_VARIANT
21493 #define THUMB_VARIANT & arm_ext_v6t2
21494
21495 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21496 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21497 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21498 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21499 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21500 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21501 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21502 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21503
21504 #undef ARM_VARIANT
21505 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21506 #undef THUMB_VARIANT
21507 #define THUMB_VARIANT & arm_ext_v4t
21508
21509 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21510 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21511 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21512 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21513 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21514 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21515
21516 #undef ARM_VARIANT
21517 #define ARM_VARIANT & arm_ext_v4t_5
21518
21519 /* ARM Architecture 4T. */
21520 /* Note: bx (and blx) are required on V5, even if the processor does
21521 not support Thumb. */
21522 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
21523
21524 #undef ARM_VARIANT
21525 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21526 #undef THUMB_VARIANT
21527 #define THUMB_VARIANT & arm_ext_v5t
21528
21529 /* Note: blx has 2 variants; the .value coded here is for
21530 BLX(2). Only this variant has conditional execution. */
21531 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
21532 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
21533
21534 #undef THUMB_VARIANT
21535 #define THUMB_VARIANT & arm_ext_v6t2
21536
21537 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
21538 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21539 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21540 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21541 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21542 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21543 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21544 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21545
21546 #undef ARM_VARIANT
21547 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21548 #undef THUMB_VARIANT
21549 #define THUMB_VARIANT & arm_ext_v5exp
21550
21551 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21552 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21553 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21554 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21555
21556 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21557 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21558
21559 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21560 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21561 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21562 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21563
21564 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21565 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21566 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21567 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21568
21569 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21570 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21571
21572 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21573 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21574 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21575 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21576
21577 #undef ARM_VARIANT
21578 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21579 #undef THUMB_VARIANT
21580 #define THUMB_VARIANT & arm_ext_v6t2
21581
21582 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
21583 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
21584 ldrd, t_ldstd),
21585 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
21586 ADDRGLDRS), ldrd, t_ldstd),
21587
21588 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21589 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21590
21591 #undef ARM_VARIANT
21592 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21593
21594 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
21595
21596 #undef ARM_VARIANT
21597 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21598 #undef THUMB_VARIANT
21599 #define THUMB_VARIANT & arm_ext_v6
21600
21601 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
21602 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
21603 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21604 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21605 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21606 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21607 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21608 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21609 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21610 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
21611
21612 #undef THUMB_VARIANT
21613 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21614
21615 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
21616 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21617 strex, t_strex),
21618 #undef THUMB_VARIANT
21619 #define THUMB_VARIANT & arm_ext_v6t2
21620
21621 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21622 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21623
21624 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
21625 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
21626
21627 /* ARM V6 not included in V7M. */
21628 #undef THUMB_VARIANT
21629 #define THUMB_VARIANT & arm_ext_v6_notm
21630 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21631 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21632 UF(rfeib, 9900a00, 1, (RRw), rfe),
21633 UF(rfeda, 8100a00, 1, (RRw), rfe),
21634 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21635 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21636 UF(rfefa, 8100a00, 1, (RRw), rfe),
21637 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21638 UF(rfeed, 9900a00, 1, (RRw), rfe),
21639 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21640 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21641 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21642 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
21643 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
21644 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
21645 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
21646 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21647 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21648 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
21649
21650 /* ARM V6 not included in V7M (eg. integer SIMD). */
21651 #undef THUMB_VARIANT
21652 #define THUMB_VARIANT & arm_ext_v6_dsp
21653 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
21654 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
21655 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21656 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21657 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21658 /* Old name for QASX. */
21659 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21660 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21661 /* Old name for QSAX. */
21662 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21663 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21664 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21665 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21666 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21667 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21668 /* Old name for SASX. */
21669 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21670 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21671 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21672 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21673 /* Old name for SHASX. */
21674 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21675 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21676 /* Old name for SHSAX. */
21677 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21678 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21679 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21680 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21681 /* Old name for SSAX. */
21682 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21683 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21684 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21685 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21686 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21687 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21688 /* Old name for UASX. */
21689 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21690 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21691 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21692 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21693 /* Old name for UHASX. */
21694 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21695 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21696 /* Old name for UHSAX. */
21697 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21698 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21699 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21700 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21701 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21702 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21703 /* Old name for UQASX. */
21704 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21705 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21706 /* Old name for UQSAX. */
21707 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21708 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21709 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21710 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21711 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21712 /* Old name for USAX. */
21713 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21714 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21715 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21716 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21717 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21718 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21719 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21720 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21721 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21722 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21723 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21724 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21725 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21726 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21727 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21728 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21729 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21730 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21731 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21732 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21733 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21734 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21735 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21736 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21737 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21738 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21739 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21740 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21741 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21742 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
21743 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
21744 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21745 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21746 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
21747
21748 #undef ARM_VARIANT
21749 #define ARM_VARIANT & arm_ext_v6k_v6t2
21750 #undef THUMB_VARIANT
21751 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21752
21753 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
21754 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
21755 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
21756 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
21757
21758 #undef THUMB_VARIANT
21759 #define THUMB_VARIANT & arm_ext_v6_notm
21760 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
21761 ldrexd, t_ldrexd),
21762 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
21763 RRnpcb), strexd, t_strexd),
21764
21765 #undef THUMB_VARIANT
21766 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21767 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
21768 rd_rn, rd_rn),
21769 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
21770 rd_rn, rd_rn),
21771 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21772 strex, t_strexbh),
21773 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21774 strex, t_strexbh),
21775 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
21776
21777 #undef ARM_VARIANT
21778 #define ARM_VARIANT & arm_ext_sec
21779 #undef THUMB_VARIANT
21780 #define THUMB_VARIANT & arm_ext_sec
21781
21782 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
21783
21784 #undef ARM_VARIANT
21785 #define ARM_VARIANT & arm_ext_virt
21786 #undef THUMB_VARIANT
21787 #define THUMB_VARIANT & arm_ext_virt
21788
21789 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
21790 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
21791
21792 #undef ARM_VARIANT
21793 #define ARM_VARIANT & arm_ext_pan
21794 #undef THUMB_VARIANT
21795 #define THUMB_VARIANT & arm_ext_pan
21796
21797 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
21798
21799 #undef ARM_VARIANT
21800 #define ARM_VARIANT & arm_ext_v6t2
21801 #undef THUMB_VARIANT
21802 #define THUMB_VARIANT & arm_ext_v6t2
21803
21804 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
21805 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
21806 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21807 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21808
21809 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21810 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
21811
21812 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21813 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21814 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21815 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21816
21817 #undef ARM_VARIANT
21818 #define ARM_VARIANT & arm_ext_v3
21819 #undef THUMB_VARIANT
21820 #define THUMB_VARIANT & arm_ext_v6t2
21821
21822 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
21823 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
21824 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
21825
21826 #undef ARM_VARIANT
21827 #define ARM_VARIANT & arm_ext_v6t2
21828 #undef THUMB_VARIANT
21829 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21830 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
21831 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
21832
21833 /* Thumb-only instructions. */
21834 #undef ARM_VARIANT
21835 #define ARM_VARIANT NULL
21836 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
21837 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
21838
21839 /* ARM does not really have an IT instruction, so always allow it.
21840 The opcode is copied from Thumb in order to allow warnings in
21841 -mimplicit-it=[never | arm] modes. */
21842 #undef ARM_VARIANT
21843 #define ARM_VARIANT & arm_ext_v1
21844 #undef THUMB_VARIANT
21845 #define THUMB_VARIANT & arm_ext_v6t2
21846
21847 TUE("it", bf08, bf08, 1, (COND), it, t_it),
21848 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
21849 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
21850 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
21851 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
21852 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
21853 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
21854 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
21855 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
21856 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
21857 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
21858 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
21859 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
21860 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
21861 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
21862 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21863 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
21864 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
21865
21866 /* Thumb2 only instructions. */
21867 #undef ARM_VARIANT
21868 #define ARM_VARIANT NULL
21869
21870 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21871 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21872 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
21873 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
21874 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
21875 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
21876
21877 /* Hardware division instructions. */
21878 #undef ARM_VARIANT
21879 #define ARM_VARIANT & arm_ext_adiv
21880 #undef THUMB_VARIANT
21881 #define THUMB_VARIANT & arm_ext_div
21882
21883 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
21884 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
21885
21886 /* ARM V6M/V7 instructions. */
21887 #undef ARM_VARIANT
21888 #define ARM_VARIANT & arm_ext_barrier
21889 #undef THUMB_VARIANT
21890 #define THUMB_VARIANT & arm_ext_barrier
21891
21892 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
21893 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
21894 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
21895
21896 /* ARM V7 instructions. */
21897 #undef ARM_VARIANT
21898 #define ARM_VARIANT & arm_ext_v7
21899 #undef THUMB_VARIANT
21900 #define THUMB_VARIANT & arm_ext_v7
21901
21902 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
21903 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
21904
21905 #undef ARM_VARIANT
21906 #define ARM_VARIANT & arm_ext_mp
21907 #undef THUMB_VARIANT
21908 #define THUMB_VARIANT & arm_ext_mp
21909
21910 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
21911
21912 /* AArchv8 instructions. */
21913 #undef ARM_VARIANT
21914 #define ARM_VARIANT & arm_ext_v8
21915
21916 /* Instructions shared between armv8-a and armv8-m. */
21917 #undef THUMB_VARIANT
21918 #define THUMB_VARIANT & arm_ext_atomics
21919
21920 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21921 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21922 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21923 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21924 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21925 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21926 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21927 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
21928 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21929 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
21930 stlex, t_stlex),
21931 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
21932 stlex, t_stlex),
21933 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
21934 stlex, t_stlex),
21935 #undef THUMB_VARIANT
21936 #define THUMB_VARIANT & arm_ext_v8
21937
21938 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
21939 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
21940 ldrexd, t_ldrexd),
21941 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
21942 strexd, t_strexd),
21943
21944 /* Defined in V8 but is in undefined encoding space for earlier
21945 architectures. However earlier architectures are required to treat
21946 this instuction as a semihosting trap as well. Hence while not explicitly
21947 defined as such, it is in fact correct to define the instruction for all
21948 architectures. */
21949 #undef THUMB_VARIANT
21950 #define THUMB_VARIANT & arm_ext_v1
21951 #undef ARM_VARIANT
21952 #define ARM_VARIANT & arm_ext_v1
21953 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
21954
21955 /* ARMv8 T32 only. */
21956 #undef ARM_VARIANT
21957 #define ARM_VARIANT NULL
21958 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
21959 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
21960 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
21961
21962 /* FP for ARMv8. */
21963 #undef ARM_VARIANT
21964 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21965 #undef THUMB_VARIANT
21966 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21967
21968 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
21969 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
21970 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
21971 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
21972 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21973 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21974 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
21975 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
21976 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
21977 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
21978 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
21979 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
21980 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
21981
21982 /* Crypto v1 extensions. */
21983 #undef ARM_VARIANT
21984 #define ARM_VARIANT & fpu_crypto_ext_armv8
21985 #undef THUMB_VARIANT
21986 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21987
21988 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
21989 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
21990 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
21991 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
21992 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
21993 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
21994 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
21995 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
21996 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
21997 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
21998 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
21999 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
22000 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
22001 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
22002
22003 #undef ARM_VARIANT
22004 #define ARM_VARIANT & crc_ext_armv8
22005 #undef THUMB_VARIANT
22006 #define THUMB_VARIANT & crc_ext_armv8
22007 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
22008 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
22009 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
22010 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
22011 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
22012 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
22013
22014 /* ARMv8.2 RAS extension. */
22015 #undef ARM_VARIANT
22016 #define ARM_VARIANT & arm_ext_ras
22017 #undef THUMB_VARIANT
22018 #define THUMB_VARIANT & arm_ext_ras
22019 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
22020
22021 #undef ARM_VARIANT
22022 #define ARM_VARIANT & arm_ext_v8_3
22023 #undef THUMB_VARIANT
22024 #define THUMB_VARIANT & arm_ext_v8_3
22025 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
22026 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
22027 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
22028
22029 #undef ARM_VARIANT
22030 #define ARM_VARIANT & fpu_neon_ext_dotprod
22031 #undef THUMB_VARIANT
22032 #define THUMB_VARIANT & fpu_neon_ext_dotprod
22033 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
22034 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
22035
22036 #undef ARM_VARIANT
22037 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
22038 #undef THUMB_VARIANT
22039 #define THUMB_VARIANT NULL
22040
22041 cCE("wfs", e200110, 1, (RR), rd),
22042 cCE("rfs", e300110, 1, (RR), rd),
22043 cCE("wfc", e400110, 1, (RR), rd),
22044 cCE("rfc", e500110, 1, (RR), rd),
22045
22046 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
22047 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
22048 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
22049 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
22050
22051 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
22052 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
22053 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
22054 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
22055
22056 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
22057 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
22058 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
22059 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
22060 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
22061 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
22062 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
22063 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
22064 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
22065 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
22066 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
22067 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
22068
22069 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
22070 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
22071 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
22072 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
22073 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
22074 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
22075 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
22076 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
22077 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
22078 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
22079 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
22080 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
22081
22082 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
22083 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
22084 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
22085 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
22086 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
22087 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
22088 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
22089 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
22090 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
22091 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
22092 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
22093 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
22094
22095 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
22096 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
22097 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
22098 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
22099 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
22100 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
22101 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
22102 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
22103 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
22104 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
22105 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
22106 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
22107
22108 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
22109 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
22110 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
22111 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
22112 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
22113 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
22114 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
22115 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
22116 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
22117 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
22118 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
22119 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
22120
22121 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
22122 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
22123 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
22124 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
22125 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
22126 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
22127 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
22128 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
22129 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
22130 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
22131 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
22132 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
22133
22134 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
22135 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
22136 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
22137 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
22138 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
22139 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
22140 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
22141 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
22142 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
22143 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
22144 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
22145 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
22146
22147 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
22148 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
22149 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
22150 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
22151 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
22152 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
22153 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
22154 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
22155 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
22156 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
22157 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
22158 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
22159
22160 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
22161 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
22162 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
22163 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
22164 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
22165 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
22166 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
22167 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
22168 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
22169 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
22170 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
22171 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
22172
22173 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
22174 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
22175 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
22176 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
22177 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
22178 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
22179 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
22180 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
22181 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
22182 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
22183 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
22184 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
22185
22186 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
22187 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
22188 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
22189 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
22190 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
22191 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
22192 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
22193 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
22194 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
22195 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
22196 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
22197 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
22198
22199 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
22200 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
22201 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
22202 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
22203 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
22204 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
22205 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
22206 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
22207 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
22208 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
22209 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
22210 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
22211
22212 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
22213 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
22214 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
22215 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
22216 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
22217 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
22218 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
22219 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
22220 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
22221 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
22222 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
22223 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
22224
22225 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
22226 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
22227 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
22228 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
22229 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
22230 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
22231 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
22232 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
22233 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
22234 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
22235 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
22236 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
22237
22238 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
22239 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
22240 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
22241 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
22242 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
22243 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
22244 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
22245 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
22246 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
22247 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
22248 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
22249 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
22250
22251 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
22252 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
22253 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
22254 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
22255 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
22256 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
22257 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
22258 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
22259 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
22260 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
22261 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
22262 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
22263
22264 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
22265 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
22266 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
22267 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
22268 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
22269 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22270 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22271 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22272 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
22273 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
22274 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
22275 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
22276
22277 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
22278 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
22279 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
22280 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
22281 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
22282 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22283 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22284 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22285 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
22286 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
22287 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
22288 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
22289
22290 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
22291 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
22292 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
22293 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
22294 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
22295 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22296 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22297 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22298 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
22299 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
22300 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
22301 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
22302
22303 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
22304 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
22305 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
22306 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
22307 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
22308 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22309 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22310 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22311 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
22312 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
22313 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
22314 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
22315
22316 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
22317 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
22318 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
22319 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
22320 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
22321 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22322 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22323 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22324 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
22325 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
22326 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
22327 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
22328
22329 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
22330 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
22331 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
22332 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
22333 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
22334 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22335 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22336 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22337 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
22338 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
22339 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
22340 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
22341
22342 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
22343 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
22344 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
22345 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
22346 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
22347 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22348 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22349 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22350 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
22351 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
22352 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
22353 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
22354
22355 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
22356 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
22357 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
22358 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
22359 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
22360 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22361 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22362 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22363 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
22364 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
22365 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
22366 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
22367
22368 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
22369 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
22370 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
22371 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
22372 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
22373 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22374 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22375 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22376 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
22377 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
22378 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
22379 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
22380
22381 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
22382 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
22383 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
22384 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
22385 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
22386 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22387 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22388 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22389 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
22390 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
22391 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
22392 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
22393
22394 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22395 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22396 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22397 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22398 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22399 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22400 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22401 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22402 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22403 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22404 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22405 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22406
22407 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22408 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22409 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22410 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22411 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22412 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22413 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22414 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22415 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22416 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22417 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22418 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22419
22420 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22421 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22422 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22423 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22424 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22425 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22426 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22427 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22428 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22429 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22430 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22431 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22432
22433 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
22434 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
22435 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
22436 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
22437
22438 cCL("flts", e000110, 2, (RF, RR), rn_rd),
22439 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
22440 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
22441 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
22442 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
22443 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
22444 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
22445 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
22446 cCL("flte", e080110, 2, (RF, RR), rn_rd),
22447 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
22448 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
22449 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
22450
22451 /* The implementation of the FIX instruction is broken on some
22452 assemblers, in that it accepts a precision specifier as well as a
22453 rounding specifier, despite the fact that this is meaningless.
22454 To be more compatible, we accept it as well, though of course it
22455 does not set any bits. */
22456 cCE("fix", e100110, 2, (RR, RF), rd_rm),
22457 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
22458 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
22459 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
22460 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
22461 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
22462 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
22463 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
22464 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
22465 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
22466 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
22467 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
22468 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
22469
22470 /* Instructions that were new with the real FPA, call them V2. */
22471 #undef ARM_VARIANT
22472 #define ARM_VARIANT & fpu_fpa_ext_v2
22473
22474 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22475 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22476 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22477 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22478 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22479 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22480
22481 #undef ARM_VARIANT
22482 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22483
22484 /* Moves and type conversions. */
22485 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
22486 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
22487 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
22488 cCE("fmstat", ef1fa10, 0, (), noargs),
22489 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
22490 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
22491 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
22492 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
22493 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
22494 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22495 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
22496 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22497 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
22498 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
22499
22500 /* Memory operations. */
22501 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22502 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22503 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22504 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22505 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22506 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22507 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22508 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22509 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22510 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22511 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22512 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22513 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22514 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22515 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22516 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22517 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22518 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22519
22520 /* Monadic operations. */
22521 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
22522 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
22523 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
22524
22525 /* Dyadic operations. */
22526 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22527 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22528 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22529 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22530 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22531 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22532 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22533 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22534 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22535
22536 /* Comparisons. */
22537 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
22538 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
22539 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
22540 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
22541
22542 /* Double precision load/store are still present on single precision
22543 implementations. */
22544 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22545 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22546 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22547 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22548 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22549 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22550 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22551 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22552 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22553 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22554
22555 #undef ARM_VARIANT
22556 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22557
22558 /* Moves and type conversions. */
22559 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22560 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22561 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22562 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
22563 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
22564 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
22565 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
22566 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22567 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
22568 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22569 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22570 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22571 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22572
22573 /* Monadic operations. */
22574 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22575 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22576 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22577
22578 /* Dyadic operations. */
22579 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22580 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22581 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22582 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22583 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22584 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22585 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22586 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22587 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22588
22589 /* Comparisons. */
22590 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22591 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
22592 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22593 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
22594
22595 #undef ARM_VARIANT
22596 #define ARM_VARIANT & fpu_vfp_ext_v2
22597
22598 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
22599 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
22600 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
22601 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
22602
22603 /* Instructions which may belong to either the Neon or VFP instruction sets.
22604 Individual encoder functions perform additional architecture checks. */
22605 #undef ARM_VARIANT
22606 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22607 #undef THUMB_VARIANT
22608 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22609
22610 /* These mnemonics are unique to VFP. */
22611 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
22612 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
22613 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22614 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22615 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22616 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22617 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22618 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
22619 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
22620 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
22621
22622 /* Mnemonics shared by Neon and VFP. */
22623 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
22624 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22625 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22626
22627 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22628 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22629 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22630 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22631 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22632 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22633
22634 mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
22635 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
22636 MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
22637 MNCEF(vcvtt, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
22638
22639
22640 /* NOTE: All VMOV encoding is special-cased! */
22641 NCE(vmov, 0, 1, (VMOV), neon_mov),
22642 NCE(vmovq, 0, 1, (VMOV), neon_mov),
22643
22644 #undef THUMB_VARIANT
22645 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22646 by different feature bits. Since we are setting the Thumb guard, we can
22647 require Thumb-1 which makes it a nop guard and set the right feature bit in
22648 do_vldr_vstr (). */
22649 #define THUMB_VARIANT & arm_ext_v4t
22650 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22651 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22652
22653 #undef ARM_VARIANT
22654 #define ARM_VARIANT & arm_ext_fp16
22655 #undef THUMB_VARIANT
22656 #define THUMB_VARIANT & arm_ext_fp16
22657 /* New instructions added from v8.2, allowing the extraction and insertion of
22658 the upper 16 bits of a 32-bit vector register. */
22659 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
22660 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
22661
22662 /* New backported fma/fms instructions optional in v8.2. */
22663 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
22664 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
22665
22666 #undef THUMB_VARIANT
22667 #define THUMB_VARIANT & fpu_neon_ext_v1
22668 #undef ARM_VARIANT
22669 #define ARM_VARIANT & fpu_neon_ext_v1
22670
22671 /* Data processing with three registers of the same length. */
22672 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22673 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
22674 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
22675 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22676 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22677 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22678 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22679 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22680 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22681 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22682 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22683 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22684 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22685 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22686 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22687 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22688 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22689 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22690 /* If not immediate, fall back to neon_dyadic_i64_su.
22691 shl_imm should accept I8 I16 I32 I64,
22692 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22693 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
22694 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
22695 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
22696 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
22697 /* Logic ops, types optional & ignored. */
22698 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22699 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22700 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22701 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22702 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22703 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22704 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22705 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22706 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
22707 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
22708 /* Bitfield ops, untyped. */
22709 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22710 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22711 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22712 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22713 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22714 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22715 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22716 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22717 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22718 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22719 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22720 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22721 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22722 back to neon_dyadic_if_su. */
22723 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22724 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22725 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22726 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22727 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22728 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22729 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22730 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22731 /* Comparison. Type I8 I16 I32 F32. */
22732 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
22733 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
22734 /* As above, D registers only. */
22735 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22736 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22737 /* Int and float variants, signedness unimportant. */
22738 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22739 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22740 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
22741 /* Add/sub take types I8 I16 I32 I64 F32. */
22742 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22743 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22744 /* vtst takes sizes 8, 16, 32. */
22745 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
22746 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
22747 /* VMUL takes I8 I16 I32 F32 P8. */
22748 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
22749 /* VQD{R}MULH takes S16 S32. */
22750 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22751 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22752 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22753 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22754 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22755 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22756 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22757 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22758 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22759 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22760 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22761 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22762 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22763 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22764 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22765 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22766 /* ARM v8.1 extension. */
22767 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22768 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22769 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22770 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22771
22772 /* Two address, int/float. Types S8 S16 S32 F32. */
22773 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
22774 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
22775
22776 /* Data processing with two registers and a shift amount. */
22777 /* Right shifts, and variants with rounding.
22778 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22779 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22780 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22781 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22782 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22783 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22784 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22785 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22786 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22787 /* Shift and insert. Sizes accepted 8 16 32 64. */
22788 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
22789 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
22790 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
22791 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
22792 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22793 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
22794 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
22795 /* Right shift immediate, saturating & narrowing, with rounding variants.
22796 Types accepted S16 S32 S64 U16 U32 U64. */
22797 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22798 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22799 /* As above, unsigned. Types accepted S16 S32 S64. */
22800 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22801 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22802 /* Right shift narrowing. Types accepted I16 I32 I64. */
22803 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22804 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22805 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22806 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
22807 /* CVT with optional immediate for fixed-point variant. */
22808 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
22809
22810 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
22811 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
22812
22813 /* Data processing, three registers of different lengths. */
22814 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22815 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
22816 /* If not scalar, fall back to neon_dyadic_long.
22817 Vector types as above, scalar types S16 S32 U16 U32. */
22818 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22819 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22820 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22821 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22822 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22823 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22824 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22825 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22826 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22827 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22828 /* Saturating doubling multiplies. Types S16 S32. */
22829 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22830 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22831 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22832 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22833 S16 S32 U16 U32. */
22834 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
22835
22836 /* Extract. Size 8. */
22837 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
22838 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
22839
22840 /* Two registers, miscellaneous. */
22841 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22842 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
22843 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
22844 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
22845 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
22846 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
22847 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
22848 /* Vector replicate. Sizes 8 16 32. */
22849 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
22850 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
22851 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22852 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
22853 /* VMOVN. Types I16 I32 I64. */
22854 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
22855 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22856 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
22857 /* VQMOVUN. Types S16 S32 S64. */
22858 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
22859 /* VZIP / VUZP. Sizes 8 16 32. */
22860 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
22861 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
22862 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
22863 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
22864 /* VQABS / VQNEG. Types S8 S16 S32. */
22865 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22866 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
22867 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22868 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
22869 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22870 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
22871 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
22872 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
22873 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
22874 /* Reciprocal estimates. Types U32 F16 F32. */
22875 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
22876 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
22877 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
22878 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
22879 /* VCLS. Types S8 S16 S32. */
22880 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
22881 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
22882 /* VCLZ. Types I8 I16 I32. */
22883 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
22884 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
22885 /* VCNT. Size 8. */
22886 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
22887 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
22888 /* Two address, untyped. */
22889 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
22890 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
22891 /* VTRN. Sizes 8 16 32. */
22892 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
22893 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
22894
22895 /* Table lookup. Size 8. */
22896 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22897 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22898
22899 #undef THUMB_VARIANT
22900 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22901 #undef ARM_VARIANT
22902 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22903
22904 /* Neon element/structure load/store. */
22905 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22906 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22907 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22908 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22909 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22910 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22911 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22912 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22913
22914 #undef THUMB_VARIANT
22915 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22916 #undef ARM_VARIANT
22917 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22918 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
22919 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22920 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22921 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22922 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22923 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22924 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22925 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22926 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22927
22928 #undef THUMB_VARIANT
22929 #define THUMB_VARIANT & fpu_vfp_ext_v3
22930 #undef ARM_VARIANT
22931 #define ARM_VARIANT & fpu_vfp_ext_v3
22932
22933 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
22934 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22935 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22936 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22937 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22938 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22939 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22940 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22941 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22942
22943 #undef ARM_VARIANT
22944 #define ARM_VARIANT & fpu_vfp_ext_fma
22945 #undef THUMB_VARIANT
22946 #define THUMB_VARIANT & fpu_vfp_ext_fma
22947 /* Mnemonics shared by Neon and VFP. These are included in the
22948 VFP FMA variant; NEON and VFP FMA always includes the NEON
22949 FMA instructions. */
22950 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22951 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22952 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22953 the v form should always be used. */
22954 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22955 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22956 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22957 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22958 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22959 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22960
22961 #undef THUMB_VARIANT
22962 #undef ARM_VARIANT
22963 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22964
22965 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22966 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22967 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22968 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22969 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22970 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22971 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
22972 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
22973
22974 #undef ARM_VARIANT
22975 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22976
22977 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
22978 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
22979 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
22980 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
22981 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
22982 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
22983 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
22984 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
22985 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
22986 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22987 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22988 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22989 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22990 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22991 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22992 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22993 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22994 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22995 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
22996 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
22997 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22998 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22999 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
23000 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
23001 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
23002 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
23003 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
23004 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
23005 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
23006 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
23007 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
23008 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
23009 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
23010 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
23011 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
23012 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
23013 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
23014 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23015 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23016 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23017 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23018 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23019 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23020 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23021 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23022 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23023 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
23024 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23025 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23026 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23027 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23028 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23029 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23030 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23031 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23032 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23033 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23034 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23035 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23036 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23037 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23038 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23039 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23040 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23041 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23042 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23043 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
23044 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
23045 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
23046 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
23047 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23048 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23049 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23050 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23051 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23052 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23053 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23054 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23055 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23056 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23057 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23058 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23059 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23060 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23061 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23062 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23063 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23064 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23065 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
23066 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23067 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23068 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23069 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23070 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23071 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23072 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23073 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23074 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23075 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23076 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23077 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23078 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23079 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23080 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23081 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23082 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23083 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23084 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23085 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23086 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23087 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
23088 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23089 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23090 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23091 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23092 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23093 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23094 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23095 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23096 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23097 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23098 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23099 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23100 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23101 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23102 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23103 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23104 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
23105 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
23106 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
23107 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
23108 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
23109 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
23110 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23111 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23112 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23113 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23114 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23115 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23116 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23117 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23118 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23119 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
23120 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
23121 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
23122 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
23123 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
23124 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
23125 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23126 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23127 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23128 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
23129 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
23130 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
23131 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
23132 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
23133 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
23134 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23135 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23136 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23137 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23138 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
23139
23140 #undef ARM_VARIANT
23141 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
23142
23143 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
23144 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
23145 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
23146 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
23147 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
23148 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
23149 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23150 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23151 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23152 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23153 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23154 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23155 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23156 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23157 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23158 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23159 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23160 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23161 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23162 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23163 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
23164 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23165 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23166 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23167 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23168 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23169 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23170 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23171 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23172 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23173 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23174 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23175 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23176 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23177 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23178 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23179 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23180 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23181 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23182 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23183 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23184 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23185 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23186 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23187 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23188 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23189 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23190 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23191 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23192 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23193 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23194 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23195 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23196 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23197 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23198 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23199 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23200
23201 #undef ARM_VARIANT
23202 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
23203
23204 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
23205 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
23206 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
23207 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
23208 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
23209 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
23210 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
23211 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
23212 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
23213 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
23214 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
23215 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
23216 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
23217 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
23218 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
23219 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
23220 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
23221 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
23222 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
23223 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
23224 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
23225 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
23226 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
23227 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
23228 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
23229 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
23230 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
23231 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
23232 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
23233 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
23234 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
23235 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
23236 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
23237 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
23238 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
23239 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
23240 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
23241 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
23242 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
23243 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
23244 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
23245 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
23246 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
23247 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
23248 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
23249 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
23250 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
23251 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
23252 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
23253 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
23254 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
23255 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
23256 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
23257 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
23258 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
23259 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
23260 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
23261 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
23262 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
23263 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
23264 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
23265 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
23266 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
23267 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
23268 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23269 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23270 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23271 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23272 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23273 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23274 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23275 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23276 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
23277 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
23278 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
23279 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
23280
23281 /* ARMv8.5-A instructions. */
23282 #undef ARM_VARIANT
23283 #define ARM_VARIANT & arm_ext_sb
23284 #undef THUMB_VARIANT
23285 #define THUMB_VARIANT & arm_ext_sb
23286 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
23287
23288 #undef ARM_VARIANT
23289 #define ARM_VARIANT & arm_ext_predres
23290 #undef THUMB_VARIANT
23291 #define THUMB_VARIANT & arm_ext_predres
23292 CE("cfprctx", e070f93, 1, (RRnpc), rd),
23293 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
23294 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
23295
23296 /* ARMv8-M instructions. */
23297 #undef ARM_VARIANT
23298 #define ARM_VARIANT NULL
23299 #undef THUMB_VARIANT
23300 #define THUMB_VARIANT & arm_ext_v8m
23301 ToU("sg", e97fe97f, 0, (), noargs),
23302 ToC("blxns", 4784, 1, (RRnpc), t_blx),
23303 ToC("bxns", 4704, 1, (RRnpc), t_bx),
23304 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
23305 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
23306 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
23307 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
23308
23309 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
23310 instructions behave as nop if no VFP is present. */
23311 #undef THUMB_VARIANT
23312 #define THUMB_VARIANT & arm_ext_v8m_main
23313 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
23314 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
23315
23316 /* Armv8.1-M Mainline instructions. */
23317 #undef THUMB_VARIANT
23318 #define THUMB_VARIANT & arm_ext_v8_1m_main
23319 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
23320 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
23321 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
23322 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
23323 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
23324
23325 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
23326 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
23327 toU("le", _le, 2, (oLR, EXP), t_loloop),
23328
23329 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
23330 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
23331
23332 #undef THUMB_VARIANT
23333 #define THUMB_VARIANT & mve_ext
23334 ToC("vpst", fe710f4d, 0, (), mve_vpt),
23335 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
23336 ToC("vpste", fe718f4d, 0, (), mve_vpt),
23337 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
23338 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
23339 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
23340 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
23341 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
23342 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
23343 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
23344 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
23345 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
23346 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
23347 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
23348 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
23349
23350 /* MVE and MVE FP only. */
23351 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
23352 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
23353 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23354 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23355 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
23356 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
23357 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23358 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23359 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23360 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23361 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
23362 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
23363
23364 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23365 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23366 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23367 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23368 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23369 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23370 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23371 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23372 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23373 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23374 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23375 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23376 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23377 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23378 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23379 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23380 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23381 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23382 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23383 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23384
23385 #undef ARM_VARIANT
23386 #define ARM_VARIANT & fpu_vfp_ext_v1xd
23387 #undef THUMB_VARIANT
23388 #define THUMB_VARIANT & arm_ext_v6t2
23389
23390 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
23391 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
23392 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
23393
23394 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
23395 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
23396
23397 #undef ARM_VARIANT
23398 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
23399 mnUF(vcvta, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvta),
23400 mnUF(vcvtp, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtp),
23401 mnUF(vcvtn, _vcvta, 3, (RNSDQMQ, oRNSDQMQ, oI32z), neon_cvtn),
23402 mnUF(vcvtm, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtm),
23403
23404 #undef ARM_VARIANT
23405 #define ARM_VARIANT & fpu_neon_ext_v1
23406 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
23407 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
23408 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
23409 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
23410 };
23411 #undef ARM_VARIANT
23412 #undef THUMB_VARIANT
23413 #undef TCE
23414 #undef TUE
23415 #undef TUF
23416 #undef TCC
23417 #undef cCE
23418 #undef cCL
23419 #undef C3E
23420 #undef C3
23421 #undef CE
23422 #undef CM
23423 #undef CL
23424 #undef UE
23425 #undef UF
23426 #undef UT
23427 #undef NUF
23428 #undef nUF
23429 #undef NCE
23430 #undef nCE
23431 #undef OPS0
23432 #undef OPS1
23433 #undef OPS2
23434 #undef OPS3
23435 #undef OPS4
23436 #undef OPS5
23437 #undef OPS6
23438 #undef do_0
23439 #undef ToC
23440 #undef toC
23441 #undef ToU
23442 #undef toU
23443 \f
23444 /* MD interface: bits in the object file. */
23445
23446 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23447 for use in the a.out file, and stores them in the array pointed to by buf.
23448 This knows about the endian-ness of the target machine and does
23449 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23450 2 (short) and 4 (long) Floating numbers are put out as a series of
23451 LITTLENUMS (shorts, here at least). */
23452
23453 void
23454 md_number_to_chars (char * buf, valueT val, int n)
23455 {
23456 if (target_big_endian)
23457 number_to_chars_bigendian (buf, val, n);
23458 else
23459 number_to_chars_littleendian (buf, val, n);
23460 }
23461
23462 static valueT
23463 md_chars_to_number (char * buf, int n)
23464 {
23465 valueT result = 0;
23466 unsigned char * where = (unsigned char *) buf;
23467
23468 if (target_big_endian)
23469 {
23470 while (n--)
23471 {
23472 result <<= 8;
23473 result |= (*where++ & 255);
23474 }
23475 }
23476 else
23477 {
23478 while (n--)
23479 {
23480 result <<= 8;
23481 result |= (where[n] & 255);
23482 }
23483 }
23484
23485 return result;
23486 }
23487
23488 /* MD interface: Sections. */
23489
23490 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23491 that an rs_machine_dependent frag may reach. */
23492
23493 unsigned int
23494 arm_frag_max_var (fragS *fragp)
23495 {
23496 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23497 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23498
23499 Note that we generate relaxable instructions even for cases that don't
23500 really need it, like an immediate that's a trivial constant. So we're
23501 overestimating the instruction size for some of those cases. Rather
23502 than putting more intelligence here, it would probably be better to
23503 avoid generating a relaxation frag in the first place when it can be
23504 determined up front that a short instruction will suffice. */
23505
23506 gas_assert (fragp->fr_type == rs_machine_dependent);
23507 return INSN_SIZE;
23508 }
23509
23510 /* Estimate the size of a frag before relaxing. Assume everything fits in
23511 2 bytes. */
23512
23513 int
23514 md_estimate_size_before_relax (fragS * fragp,
23515 segT segtype ATTRIBUTE_UNUSED)
23516 {
23517 fragp->fr_var = 2;
23518 return 2;
23519 }
23520
23521 /* Convert a machine dependent frag. */
23522
23523 void
23524 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
23525 {
23526 unsigned long insn;
23527 unsigned long old_op;
23528 char *buf;
23529 expressionS exp;
23530 fixS *fixp;
23531 int reloc_type;
23532 int pc_rel;
23533 int opcode;
23534
23535 buf = fragp->fr_literal + fragp->fr_fix;
23536
23537 old_op = bfd_get_16(abfd, buf);
23538 if (fragp->fr_symbol)
23539 {
23540 exp.X_op = O_symbol;
23541 exp.X_add_symbol = fragp->fr_symbol;
23542 }
23543 else
23544 {
23545 exp.X_op = O_constant;
23546 }
23547 exp.X_add_number = fragp->fr_offset;
23548 opcode = fragp->fr_subtype;
23549 switch (opcode)
23550 {
23551 case T_MNEM_ldr_pc:
23552 case T_MNEM_ldr_pc2:
23553 case T_MNEM_ldr_sp:
23554 case T_MNEM_str_sp:
23555 case T_MNEM_ldr:
23556 case T_MNEM_ldrb:
23557 case T_MNEM_ldrh:
23558 case T_MNEM_str:
23559 case T_MNEM_strb:
23560 case T_MNEM_strh:
23561 if (fragp->fr_var == 4)
23562 {
23563 insn = THUMB_OP32 (opcode);
23564 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
23565 {
23566 insn |= (old_op & 0x700) << 4;
23567 }
23568 else
23569 {
23570 insn |= (old_op & 7) << 12;
23571 insn |= (old_op & 0x38) << 13;
23572 }
23573 insn |= 0x00000c00;
23574 put_thumb32_insn (buf, insn);
23575 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
23576 }
23577 else
23578 {
23579 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
23580 }
23581 pc_rel = (opcode == T_MNEM_ldr_pc2);
23582 break;
23583 case T_MNEM_adr:
23584 if (fragp->fr_var == 4)
23585 {
23586 insn = THUMB_OP32 (opcode);
23587 insn |= (old_op & 0xf0) << 4;
23588 put_thumb32_insn (buf, insn);
23589 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
23590 }
23591 else
23592 {
23593 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23594 exp.X_add_number -= 4;
23595 }
23596 pc_rel = 1;
23597 break;
23598 case T_MNEM_mov:
23599 case T_MNEM_movs:
23600 case T_MNEM_cmp:
23601 case T_MNEM_cmn:
23602 if (fragp->fr_var == 4)
23603 {
23604 int r0off = (opcode == T_MNEM_mov
23605 || opcode == T_MNEM_movs) ? 0 : 8;
23606 insn = THUMB_OP32 (opcode);
23607 insn = (insn & 0xe1ffffff) | 0x10000000;
23608 insn |= (old_op & 0x700) << r0off;
23609 put_thumb32_insn (buf, insn);
23610 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23611 }
23612 else
23613 {
23614 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
23615 }
23616 pc_rel = 0;
23617 break;
23618 case T_MNEM_b:
23619 if (fragp->fr_var == 4)
23620 {
23621 insn = THUMB_OP32(opcode);
23622 put_thumb32_insn (buf, insn);
23623 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
23624 }
23625 else
23626 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
23627 pc_rel = 1;
23628 break;
23629 case T_MNEM_bcond:
23630 if (fragp->fr_var == 4)
23631 {
23632 insn = THUMB_OP32(opcode);
23633 insn |= (old_op & 0xf00) << 14;
23634 put_thumb32_insn (buf, insn);
23635 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
23636 }
23637 else
23638 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
23639 pc_rel = 1;
23640 break;
23641 case T_MNEM_add_sp:
23642 case T_MNEM_add_pc:
23643 case T_MNEM_inc_sp:
23644 case T_MNEM_dec_sp:
23645 if (fragp->fr_var == 4)
23646 {
23647 /* ??? Choose between add and addw. */
23648 insn = THUMB_OP32 (opcode);
23649 insn |= (old_op & 0xf0) << 4;
23650 put_thumb32_insn (buf, insn);
23651 if (opcode == T_MNEM_add_pc)
23652 reloc_type = BFD_RELOC_ARM_T32_IMM12;
23653 else
23654 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23655 }
23656 else
23657 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23658 pc_rel = 0;
23659 break;
23660
23661 case T_MNEM_addi:
23662 case T_MNEM_addis:
23663 case T_MNEM_subi:
23664 case T_MNEM_subis:
23665 if (fragp->fr_var == 4)
23666 {
23667 insn = THUMB_OP32 (opcode);
23668 insn |= (old_op & 0xf0) << 4;
23669 insn |= (old_op & 0xf) << 16;
23670 put_thumb32_insn (buf, insn);
23671 if (insn & (1 << 20))
23672 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23673 else
23674 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23675 }
23676 else
23677 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23678 pc_rel = 0;
23679 break;
23680 default:
23681 abort ();
23682 }
23683 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
23684 (enum bfd_reloc_code_real) reloc_type);
23685 fixp->fx_file = fragp->fr_file;
23686 fixp->fx_line = fragp->fr_line;
23687 fragp->fr_fix += fragp->fr_var;
23688
23689 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23690 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
23691 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
23692 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
23693 }
23694
23695 /* Return the size of a relaxable immediate operand instruction.
23696 SHIFT and SIZE specify the form of the allowable immediate. */
23697 static int
23698 relax_immediate (fragS *fragp, int size, int shift)
23699 {
23700 offsetT offset;
23701 offsetT mask;
23702 offsetT low;
23703
23704 /* ??? Should be able to do better than this. */
23705 if (fragp->fr_symbol)
23706 return 4;
23707
23708 low = (1 << shift) - 1;
23709 mask = (1 << (shift + size)) - (1 << shift);
23710 offset = fragp->fr_offset;
23711 /* Force misaligned offsets to 32-bit variant. */
23712 if (offset & low)
23713 return 4;
23714 if (offset & ~mask)
23715 return 4;
23716 return 2;
23717 }
23718
23719 /* Get the address of a symbol during relaxation. */
23720 static addressT
23721 relaxed_symbol_addr (fragS *fragp, long stretch)
23722 {
23723 fragS *sym_frag;
23724 addressT addr;
23725 symbolS *sym;
23726
23727 sym = fragp->fr_symbol;
23728 sym_frag = symbol_get_frag (sym);
23729 know (S_GET_SEGMENT (sym) != absolute_section
23730 || sym_frag == &zero_address_frag);
23731 addr = S_GET_VALUE (sym) + fragp->fr_offset;
23732
23733 /* If frag has yet to be reached on this pass, assume it will
23734 move by STRETCH just as we did. If this is not so, it will
23735 be because some frag between grows, and that will force
23736 another pass. */
23737
23738 if (stretch != 0
23739 && sym_frag->relax_marker != fragp->relax_marker)
23740 {
23741 fragS *f;
23742
23743 /* Adjust stretch for any alignment frag. Note that if have
23744 been expanding the earlier code, the symbol may be
23745 defined in what appears to be an earlier frag. FIXME:
23746 This doesn't handle the fr_subtype field, which specifies
23747 a maximum number of bytes to skip when doing an
23748 alignment. */
23749 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
23750 {
23751 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
23752 {
23753 if (stretch < 0)
23754 stretch = - ((- stretch)
23755 & ~ ((1 << (int) f->fr_offset) - 1));
23756 else
23757 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
23758 if (stretch == 0)
23759 break;
23760 }
23761 }
23762 if (f != NULL)
23763 addr += stretch;
23764 }
23765
23766 return addr;
23767 }
23768
23769 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23770 load. */
23771 static int
23772 relax_adr (fragS *fragp, asection *sec, long stretch)
23773 {
23774 addressT addr;
23775 offsetT val;
23776
23777 /* Assume worst case for symbols not known to be in the same section. */
23778 if (fragp->fr_symbol == NULL
23779 || !S_IS_DEFINED (fragp->fr_symbol)
23780 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23781 || S_IS_WEAK (fragp->fr_symbol))
23782 return 4;
23783
23784 val = relaxed_symbol_addr (fragp, stretch);
23785 addr = fragp->fr_address + fragp->fr_fix;
23786 addr = (addr + 4) & ~3;
23787 /* Force misaligned targets to 32-bit variant. */
23788 if (val & 3)
23789 return 4;
23790 val -= addr;
23791 if (val < 0 || val > 1020)
23792 return 4;
23793 return 2;
23794 }
23795
23796 /* Return the size of a relaxable add/sub immediate instruction. */
23797 static int
23798 relax_addsub (fragS *fragp, asection *sec)
23799 {
23800 char *buf;
23801 int op;
23802
23803 buf = fragp->fr_literal + fragp->fr_fix;
23804 op = bfd_get_16(sec->owner, buf);
23805 if ((op & 0xf) == ((op >> 4) & 0xf))
23806 return relax_immediate (fragp, 8, 0);
23807 else
23808 return relax_immediate (fragp, 3, 0);
23809 }
23810
23811 /* Return TRUE iff the definition of symbol S could be pre-empted
23812 (overridden) at link or load time. */
23813 static bfd_boolean
23814 symbol_preemptible (symbolS *s)
23815 {
23816 /* Weak symbols can always be pre-empted. */
23817 if (S_IS_WEAK (s))
23818 return TRUE;
23819
23820 /* Non-global symbols cannot be pre-empted. */
23821 if (! S_IS_EXTERNAL (s))
23822 return FALSE;
23823
23824 #ifdef OBJ_ELF
23825 /* In ELF, a global symbol can be marked protected, or private. In that
23826 case it can't be pre-empted (other definitions in the same link unit
23827 would violate the ODR). */
23828 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
23829 return FALSE;
23830 #endif
23831
23832 /* Other global symbols might be pre-empted. */
23833 return TRUE;
23834 }
23835
23836 /* Return the size of a relaxable branch instruction. BITS is the
23837 size of the offset field in the narrow instruction. */
23838
23839 static int
23840 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
23841 {
23842 addressT addr;
23843 offsetT val;
23844 offsetT limit;
23845
23846 /* Assume worst case for symbols not known to be in the same section. */
23847 if (!S_IS_DEFINED (fragp->fr_symbol)
23848 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23849 || S_IS_WEAK (fragp->fr_symbol))
23850 return 4;
23851
23852 #ifdef OBJ_ELF
23853 /* A branch to a function in ARM state will require interworking. */
23854 if (S_IS_DEFINED (fragp->fr_symbol)
23855 && ARM_IS_FUNC (fragp->fr_symbol))
23856 return 4;
23857 #endif
23858
23859 if (symbol_preemptible (fragp->fr_symbol))
23860 return 4;
23861
23862 val = relaxed_symbol_addr (fragp, stretch);
23863 addr = fragp->fr_address + fragp->fr_fix + 4;
23864 val -= addr;
23865
23866 /* Offset is a signed value *2 */
23867 limit = 1 << bits;
23868 if (val >= limit || val < -limit)
23869 return 4;
23870 return 2;
23871 }
23872
23873
23874 /* Relax a machine dependent frag. This returns the amount by which
23875 the current size of the frag should change. */
23876
23877 int
23878 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
23879 {
23880 int oldsize;
23881 int newsize;
23882
23883 oldsize = fragp->fr_var;
23884 switch (fragp->fr_subtype)
23885 {
23886 case T_MNEM_ldr_pc2:
23887 newsize = relax_adr (fragp, sec, stretch);
23888 break;
23889 case T_MNEM_ldr_pc:
23890 case T_MNEM_ldr_sp:
23891 case T_MNEM_str_sp:
23892 newsize = relax_immediate (fragp, 8, 2);
23893 break;
23894 case T_MNEM_ldr:
23895 case T_MNEM_str:
23896 newsize = relax_immediate (fragp, 5, 2);
23897 break;
23898 case T_MNEM_ldrh:
23899 case T_MNEM_strh:
23900 newsize = relax_immediate (fragp, 5, 1);
23901 break;
23902 case T_MNEM_ldrb:
23903 case T_MNEM_strb:
23904 newsize = relax_immediate (fragp, 5, 0);
23905 break;
23906 case T_MNEM_adr:
23907 newsize = relax_adr (fragp, sec, stretch);
23908 break;
23909 case T_MNEM_mov:
23910 case T_MNEM_movs:
23911 case T_MNEM_cmp:
23912 case T_MNEM_cmn:
23913 newsize = relax_immediate (fragp, 8, 0);
23914 break;
23915 case T_MNEM_b:
23916 newsize = relax_branch (fragp, sec, 11, stretch);
23917 break;
23918 case T_MNEM_bcond:
23919 newsize = relax_branch (fragp, sec, 8, stretch);
23920 break;
23921 case T_MNEM_add_sp:
23922 case T_MNEM_add_pc:
23923 newsize = relax_immediate (fragp, 8, 2);
23924 break;
23925 case T_MNEM_inc_sp:
23926 case T_MNEM_dec_sp:
23927 newsize = relax_immediate (fragp, 7, 2);
23928 break;
23929 case T_MNEM_addi:
23930 case T_MNEM_addis:
23931 case T_MNEM_subi:
23932 case T_MNEM_subis:
23933 newsize = relax_addsub (fragp, sec);
23934 break;
23935 default:
23936 abort ();
23937 }
23938
23939 fragp->fr_var = newsize;
23940 /* Freeze wide instructions that are at or before the same location as
23941 in the previous pass. This avoids infinite loops.
23942 Don't freeze them unconditionally because targets may be artificially
23943 misaligned by the expansion of preceding frags. */
23944 if (stretch <= 0 && newsize > 2)
23945 {
23946 md_convert_frag (sec->owner, sec, fragp);
23947 frag_wane (fragp);
23948 }
23949
23950 return newsize - oldsize;
23951 }
23952
23953 /* Round up a section size to the appropriate boundary. */
23954
23955 valueT
23956 md_section_align (segT segment ATTRIBUTE_UNUSED,
23957 valueT size)
23958 {
23959 return size;
23960 }
23961
23962 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23963 of an rs_align_code fragment. */
23964
23965 void
23966 arm_handle_align (fragS * fragP)
23967 {
23968 static unsigned char const arm_noop[2][2][4] =
23969 {
23970 { /* ARMv1 */
23971 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23972 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23973 },
23974 { /* ARMv6k */
23975 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23976 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23977 },
23978 };
23979 static unsigned char const thumb_noop[2][2][2] =
23980 {
23981 { /* Thumb-1 */
23982 {0xc0, 0x46}, /* LE */
23983 {0x46, 0xc0}, /* BE */
23984 },
23985 { /* Thumb-2 */
23986 {0x00, 0xbf}, /* LE */
23987 {0xbf, 0x00} /* BE */
23988 }
23989 };
23990 static unsigned char const wide_thumb_noop[2][4] =
23991 { /* Wide Thumb-2 */
23992 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23993 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23994 };
23995
23996 unsigned bytes, fix, noop_size;
23997 char * p;
23998 const unsigned char * noop;
23999 const unsigned char *narrow_noop = NULL;
24000 #ifdef OBJ_ELF
24001 enum mstate state;
24002 #endif
24003
24004 if (fragP->fr_type != rs_align_code)
24005 return;
24006
24007 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
24008 p = fragP->fr_literal + fragP->fr_fix;
24009 fix = 0;
24010
24011 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
24012 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
24013
24014 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
24015
24016 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
24017 {
24018 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
24019 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
24020 {
24021 narrow_noop = thumb_noop[1][target_big_endian];
24022 noop = wide_thumb_noop[target_big_endian];
24023 }
24024 else
24025 noop = thumb_noop[0][target_big_endian];
24026 noop_size = 2;
24027 #ifdef OBJ_ELF
24028 state = MAP_THUMB;
24029 #endif
24030 }
24031 else
24032 {
24033 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
24034 ? selected_cpu : arm_arch_none,
24035 arm_ext_v6k) != 0]
24036 [target_big_endian];
24037 noop_size = 4;
24038 #ifdef OBJ_ELF
24039 state = MAP_ARM;
24040 #endif
24041 }
24042
24043 fragP->fr_var = noop_size;
24044
24045 if (bytes & (noop_size - 1))
24046 {
24047 fix = bytes & (noop_size - 1);
24048 #ifdef OBJ_ELF
24049 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
24050 #endif
24051 memset (p, 0, fix);
24052 p += fix;
24053 bytes -= fix;
24054 }
24055
24056 if (narrow_noop)
24057 {
24058 if (bytes & noop_size)
24059 {
24060 /* Insert a narrow noop. */
24061 memcpy (p, narrow_noop, noop_size);
24062 p += noop_size;
24063 bytes -= noop_size;
24064 fix += noop_size;
24065 }
24066
24067 /* Use wide noops for the remainder */
24068 noop_size = 4;
24069 }
24070
24071 while (bytes >= noop_size)
24072 {
24073 memcpy (p, noop, noop_size);
24074 p += noop_size;
24075 bytes -= noop_size;
24076 fix += noop_size;
24077 }
24078
24079 fragP->fr_fix += fix;
24080 }
24081
24082 /* Called from md_do_align. Used to create an alignment
24083 frag in a code section. */
24084
24085 void
24086 arm_frag_align_code (int n, int max)
24087 {
24088 char * p;
24089
24090 /* We assume that there will never be a requirement
24091 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
24092 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
24093 {
24094 char err_msg[128];
24095
24096 sprintf (err_msg,
24097 _("alignments greater than %d bytes not supported in .text sections."),
24098 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
24099 as_fatal ("%s", err_msg);
24100 }
24101
24102 p = frag_var (rs_align_code,
24103 MAX_MEM_FOR_RS_ALIGN_CODE,
24104 1,
24105 (relax_substateT) max,
24106 (symbolS *) NULL,
24107 (offsetT) n,
24108 (char *) NULL);
24109 *p = 0;
24110 }
24111
24112 /* Perform target specific initialisation of a frag.
24113 Note - despite the name this initialisation is not done when the frag
24114 is created, but only when its type is assigned. A frag can be created
24115 and used a long time before its type is set, so beware of assuming that
24116 this initialisation is performed first. */
24117
24118 #ifndef OBJ_ELF
24119 void
24120 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
24121 {
24122 /* Record whether this frag is in an ARM or a THUMB area. */
24123 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
24124 }
24125
24126 #else /* OBJ_ELF is defined. */
24127 void
24128 arm_init_frag (fragS * fragP, int max_chars)
24129 {
24130 bfd_boolean frag_thumb_mode;
24131
24132 /* If the current ARM vs THUMB mode has not already
24133 been recorded into this frag then do so now. */
24134 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
24135 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
24136
24137 /* PR 21809: Do not set a mapping state for debug sections
24138 - it just confuses other tools. */
24139 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
24140 return;
24141
24142 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
24143
24144 /* Record a mapping symbol for alignment frags. We will delete this
24145 later if the alignment ends up empty. */
24146 switch (fragP->fr_type)
24147 {
24148 case rs_align:
24149 case rs_align_test:
24150 case rs_fill:
24151 mapping_state_2 (MAP_DATA, max_chars);
24152 break;
24153 case rs_align_code:
24154 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
24155 break;
24156 default:
24157 break;
24158 }
24159 }
24160
24161 /* When we change sections we need to issue a new mapping symbol. */
24162
24163 void
24164 arm_elf_change_section (void)
24165 {
24166 /* Link an unlinked unwind index table section to the .text section. */
24167 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
24168 && elf_linked_to_section (now_seg) == NULL)
24169 elf_linked_to_section (now_seg) = text_section;
24170 }
24171
24172 int
24173 arm_elf_section_type (const char * str, size_t len)
24174 {
24175 if (len == 5 && strncmp (str, "exidx", 5) == 0)
24176 return SHT_ARM_EXIDX;
24177
24178 return -1;
24179 }
24180 \f
24181 /* Code to deal with unwinding tables. */
24182
24183 static void add_unwind_adjustsp (offsetT);
24184
24185 /* Generate any deferred unwind frame offset. */
24186
24187 static void
24188 flush_pending_unwind (void)
24189 {
24190 offsetT offset;
24191
24192 offset = unwind.pending_offset;
24193 unwind.pending_offset = 0;
24194 if (offset != 0)
24195 add_unwind_adjustsp (offset);
24196 }
24197
24198 /* Add an opcode to this list for this function. Two-byte opcodes should
24199 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
24200 order. */
24201
24202 static void
24203 add_unwind_opcode (valueT op, int length)
24204 {
24205 /* Add any deferred stack adjustment. */
24206 if (unwind.pending_offset)
24207 flush_pending_unwind ();
24208
24209 unwind.sp_restored = 0;
24210
24211 if (unwind.opcode_count + length > unwind.opcode_alloc)
24212 {
24213 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
24214 if (unwind.opcodes)
24215 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
24216 unwind.opcode_alloc);
24217 else
24218 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
24219 }
24220 while (length > 0)
24221 {
24222 length--;
24223 unwind.opcodes[unwind.opcode_count] = op & 0xff;
24224 op >>= 8;
24225 unwind.opcode_count++;
24226 }
24227 }
24228
24229 /* Add unwind opcodes to adjust the stack pointer. */
24230
24231 static void
24232 add_unwind_adjustsp (offsetT offset)
24233 {
24234 valueT op;
24235
24236 if (offset > 0x200)
24237 {
24238 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
24239 char bytes[5];
24240 int n;
24241 valueT o;
24242
24243 /* Long form: 0xb2, uleb128. */
24244 /* This might not fit in a word so add the individual bytes,
24245 remembering the list is built in reverse order. */
24246 o = (valueT) ((offset - 0x204) >> 2);
24247 if (o == 0)
24248 add_unwind_opcode (0, 1);
24249
24250 /* Calculate the uleb128 encoding of the offset. */
24251 n = 0;
24252 while (o)
24253 {
24254 bytes[n] = o & 0x7f;
24255 o >>= 7;
24256 if (o)
24257 bytes[n] |= 0x80;
24258 n++;
24259 }
24260 /* Add the insn. */
24261 for (; n; n--)
24262 add_unwind_opcode (bytes[n - 1], 1);
24263 add_unwind_opcode (0xb2, 1);
24264 }
24265 else if (offset > 0x100)
24266 {
24267 /* Two short opcodes. */
24268 add_unwind_opcode (0x3f, 1);
24269 op = (offset - 0x104) >> 2;
24270 add_unwind_opcode (op, 1);
24271 }
24272 else if (offset > 0)
24273 {
24274 /* Short opcode. */
24275 op = (offset - 4) >> 2;
24276 add_unwind_opcode (op, 1);
24277 }
24278 else if (offset < 0)
24279 {
24280 offset = -offset;
24281 while (offset > 0x100)
24282 {
24283 add_unwind_opcode (0x7f, 1);
24284 offset -= 0x100;
24285 }
24286 op = ((offset - 4) >> 2) | 0x40;
24287 add_unwind_opcode (op, 1);
24288 }
24289 }
24290
24291 /* Finish the list of unwind opcodes for this function. */
24292
24293 static void
24294 finish_unwind_opcodes (void)
24295 {
24296 valueT op;
24297
24298 if (unwind.fp_used)
24299 {
24300 /* Adjust sp as necessary. */
24301 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
24302 flush_pending_unwind ();
24303
24304 /* After restoring sp from the frame pointer. */
24305 op = 0x90 | unwind.fp_reg;
24306 add_unwind_opcode (op, 1);
24307 }
24308 else
24309 flush_pending_unwind ();
24310 }
24311
24312
24313 /* Start an exception table entry. If idx is nonzero this is an index table
24314 entry. */
24315
24316 static void
24317 start_unwind_section (const segT text_seg, int idx)
24318 {
24319 const char * text_name;
24320 const char * prefix;
24321 const char * prefix_once;
24322 const char * group_name;
24323 char * sec_name;
24324 int type;
24325 int flags;
24326 int linkonce;
24327
24328 if (idx)
24329 {
24330 prefix = ELF_STRING_ARM_unwind;
24331 prefix_once = ELF_STRING_ARM_unwind_once;
24332 type = SHT_ARM_EXIDX;
24333 }
24334 else
24335 {
24336 prefix = ELF_STRING_ARM_unwind_info;
24337 prefix_once = ELF_STRING_ARM_unwind_info_once;
24338 type = SHT_PROGBITS;
24339 }
24340
24341 text_name = segment_name (text_seg);
24342 if (streq (text_name, ".text"))
24343 text_name = "";
24344
24345 if (strncmp (text_name, ".gnu.linkonce.t.",
24346 strlen (".gnu.linkonce.t.")) == 0)
24347 {
24348 prefix = prefix_once;
24349 text_name += strlen (".gnu.linkonce.t.");
24350 }
24351
24352 sec_name = concat (prefix, text_name, (char *) NULL);
24353
24354 flags = SHF_ALLOC;
24355 linkonce = 0;
24356 group_name = 0;
24357
24358 /* Handle COMDAT group. */
24359 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
24360 {
24361 group_name = elf_group_name (text_seg);
24362 if (group_name == NULL)
24363 {
24364 as_bad (_("Group section `%s' has no group signature"),
24365 segment_name (text_seg));
24366 ignore_rest_of_line ();
24367 return;
24368 }
24369 flags |= SHF_GROUP;
24370 linkonce = 1;
24371 }
24372
24373 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
24374 linkonce, 0);
24375
24376 /* Set the section link for index tables. */
24377 if (idx)
24378 elf_linked_to_section (now_seg) = text_seg;
24379 }
24380
24381
24382 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
24383 personality routine data. Returns zero, or the index table value for
24384 an inline entry. */
24385
24386 static valueT
24387 create_unwind_entry (int have_data)
24388 {
24389 int size;
24390 addressT where;
24391 char *ptr;
24392 /* The current word of data. */
24393 valueT data;
24394 /* The number of bytes left in this word. */
24395 int n;
24396
24397 finish_unwind_opcodes ();
24398
24399 /* Remember the current text section. */
24400 unwind.saved_seg = now_seg;
24401 unwind.saved_subseg = now_subseg;
24402
24403 start_unwind_section (now_seg, 0);
24404
24405 if (unwind.personality_routine == NULL)
24406 {
24407 if (unwind.personality_index == -2)
24408 {
24409 if (have_data)
24410 as_bad (_("handlerdata in cantunwind frame"));
24411 return 1; /* EXIDX_CANTUNWIND. */
24412 }
24413
24414 /* Use a default personality routine if none is specified. */
24415 if (unwind.personality_index == -1)
24416 {
24417 if (unwind.opcode_count > 3)
24418 unwind.personality_index = 1;
24419 else
24420 unwind.personality_index = 0;
24421 }
24422
24423 /* Space for the personality routine entry. */
24424 if (unwind.personality_index == 0)
24425 {
24426 if (unwind.opcode_count > 3)
24427 as_bad (_("too many unwind opcodes for personality routine 0"));
24428
24429 if (!have_data)
24430 {
24431 /* All the data is inline in the index table. */
24432 data = 0x80;
24433 n = 3;
24434 while (unwind.opcode_count > 0)
24435 {
24436 unwind.opcode_count--;
24437 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
24438 n--;
24439 }
24440
24441 /* Pad with "finish" opcodes. */
24442 while (n--)
24443 data = (data << 8) | 0xb0;
24444
24445 return data;
24446 }
24447 size = 0;
24448 }
24449 else
24450 /* We get two opcodes "free" in the first word. */
24451 size = unwind.opcode_count - 2;
24452 }
24453 else
24454 {
24455 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24456 if (unwind.personality_index != -1)
24457 {
24458 as_bad (_("attempt to recreate an unwind entry"));
24459 return 1;
24460 }
24461
24462 /* An extra byte is required for the opcode count. */
24463 size = unwind.opcode_count + 1;
24464 }
24465
24466 size = (size + 3) >> 2;
24467 if (size > 0xff)
24468 as_bad (_("too many unwind opcodes"));
24469
24470 frag_align (2, 0, 0);
24471 record_alignment (now_seg, 2);
24472 unwind.table_entry = expr_build_dot ();
24473
24474 /* Allocate the table entry. */
24475 ptr = frag_more ((size << 2) + 4);
24476 /* PR 13449: Zero the table entries in case some of them are not used. */
24477 memset (ptr, 0, (size << 2) + 4);
24478 where = frag_now_fix () - ((size << 2) + 4);
24479
24480 switch (unwind.personality_index)
24481 {
24482 case -1:
24483 /* ??? Should this be a PLT generating relocation? */
24484 /* Custom personality routine. */
24485 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
24486 BFD_RELOC_ARM_PREL31);
24487
24488 where += 4;
24489 ptr += 4;
24490
24491 /* Set the first byte to the number of additional words. */
24492 data = size > 0 ? size - 1 : 0;
24493 n = 3;
24494 break;
24495
24496 /* ABI defined personality routines. */
24497 case 0:
24498 /* Three opcodes bytes are packed into the first word. */
24499 data = 0x80;
24500 n = 3;
24501 break;
24502
24503 case 1:
24504 case 2:
24505 /* The size and first two opcode bytes go in the first word. */
24506 data = ((0x80 + unwind.personality_index) << 8) | size;
24507 n = 2;
24508 break;
24509
24510 default:
24511 /* Should never happen. */
24512 abort ();
24513 }
24514
24515 /* Pack the opcodes into words (MSB first), reversing the list at the same
24516 time. */
24517 while (unwind.opcode_count > 0)
24518 {
24519 if (n == 0)
24520 {
24521 md_number_to_chars (ptr, data, 4);
24522 ptr += 4;
24523 n = 4;
24524 data = 0;
24525 }
24526 unwind.opcode_count--;
24527 n--;
24528 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
24529 }
24530
24531 /* Finish off the last word. */
24532 if (n < 4)
24533 {
24534 /* Pad with "finish" opcodes. */
24535 while (n--)
24536 data = (data << 8) | 0xb0;
24537
24538 md_number_to_chars (ptr, data, 4);
24539 }
24540
24541 if (!have_data)
24542 {
24543 /* Add an empty descriptor if there is no user-specified data. */
24544 ptr = frag_more (4);
24545 md_number_to_chars (ptr, 0, 4);
24546 }
24547
24548 return 0;
24549 }
24550
24551
24552 /* Initialize the DWARF-2 unwind information for this procedure. */
24553
24554 void
24555 tc_arm_frame_initial_instructions (void)
24556 {
24557 cfi_add_CFA_def_cfa (REG_SP, 0);
24558 }
24559 #endif /* OBJ_ELF */
24560
24561 /* Convert REGNAME to a DWARF-2 register number. */
24562
24563 int
24564 tc_arm_regname_to_dw2regnum (char *regname)
24565 {
24566 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
24567 if (reg != FAIL)
24568 return reg;
24569
24570 /* PR 16694: Allow VFP registers as well. */
24571 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
24572 if (reg != FAIL)
24573 return 64 + reg;
24574
24575 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
24576 if (reg != FAIL)
24577 return reg + 256;
24578
24579 return FAIL;
24580 }
24581
24582 #ifdef TE_PE
24583 void
24584 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
24585 {
24586 expressionS exp;
24587
24588 exp.X_op = O_secrel;
24589 exp.X_add_symbol = symbol;
24590 exp.X_add_number = 0;
24591 emit_expr (&exp, size);
24592 }
24593 #endif
24594
24595 /* MD interface: Symbol and relocation handling. */
24596
24597 /* Return the address within the segment that a PC-relative fixup is
24598 relative to. For ARM, PC-relative fixups applied to instructions
24599 are generally relative to the location of the fixup plus 8 bytes.
24600 Thumb branches are offset by 4, and Thumb loads relative to PC
24601 require special handling. */
24602
24603 long
24604 md_pcrel_from_section (fixS * fixP, segT seg)
24605 {
24606 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
24607
24608 /* If this is pc-relative and we are going to emit a relocation
24609 then we just want to put out any pipeline compensation that the linker
24610 will need. Otherwise we want to use the calculated base.
24611 For WinCE we skip the bias for externals as well, since this
24612 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24613 if (fixP->fx_pcrel
24614 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
24615 || (arm_force_relocation (fixP)
24616 #ifdef TE_WINCE
24617 && !S_IS_EXTERNAL (fixP->fx_addsy)
24618 #endif
24619 )))
24620 base = 0;
24621
24622
24623 switch (fixP->fx_r_type)
24624 {
24625 /* PC relative addressing on the Thumb is slightly odd as the
24626 bottom two bits of the PC are forced to zero for the
24627 calculation. This happens *after* application of the
24628 pipeline offset. However, Thumb adrl already adjusts for
24629 this, so we need not do it again. */
24630 case BFD_RELOC_ARM_THUMB_ADD:
24631 return base & ~3;
24632
24633 case BFD_RELOC_ARM_THUMB_OFFSET:
24634 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24635 case BFD_RELOC_ARM_T32_ADD_PC12:
24636 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24637 return (base + 4) & ~3;
24638
24639 /* Thumb branches are simply offset by +4. */
24640 case BFD_RELOC_THUMB_PCREL_BRANCH5:
24641 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24642 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24643 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24644 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24645 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24646 case BFD_RELOC_THUMB_PCREL_BFCSEL:
24647 case BFD_RELOC_ARM_THUMB_BF17:
24648 case BFD_RELOC_ARM_THUMB_BF19:
24649 case BFD_RELOC_ARM_THUMB_BF13:
24650 case BFD_RELOC_ARM_THUMB_LOOP12:
24651 return base + 4;
24652
24653 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24654 if (fixP->fx_addsy
24655 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24656 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24657 && ARM_IS_FUNC (fixP->fx_addsy)
24658 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24659 base = fixP->fx_where + fixP->fx_frag->fr_address;
24660 return base + 4;
24661
24662 /* BLX is like branches above, but forces the low two bits of PC to
24663 zero. */
24664 case BFD_RELOC_THUMB_PCREL_BLX:
24665 if (fixP->fx_addsy
24666 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24667 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24668 && THUMB_IS_FUNC (fixP->fx_addsy)
24669 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24670 base = fixP->fx_where + fixP->fx_frag->fr_address;
24671 return (base + 4) & ~3;
24672
24673 /* ARM mode branches are offset by +8. However, the Windows CE
24674 loader expects the relocation not to take this into account. */
24675 case BFD_RELOC_ARM_PCREL_BLX:
24676 if (fixP->fx_addsy
24677 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24678 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24679 && ARM_IS_FUNC (fixP->fx_addsy)
24680 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24681 base = fixP->fx_where + fixP->fx_frag->fr_address;
24682 return base + 8;
24683
24684 case BFD_RELOC_ARM_PCREL_CALL:
24685 if (fixP->fx_addsy
24686 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24687 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24688 && THUMB_IS_FUNC (fixP->fx_addsy)
24689 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24690 base = fixP->fx_where + fixP->fx_frag->fr_address;
24691 return base + 8;
24692
24693 case BFD_RELOC_ARM_PCREL_BRANCH:
24694 case BFD_RELOC_ARM_PCREL_JUMP:
24695 case BFD_RELOC_ARM_PLT32:
24696 #ifdef TE_WINCE
24697 /* When handling fixups immediately, because we have already
24698 discovered the value of a symbol, or the address of the frag involved
24699 we must account for the offset by +8, as the OS loader will never see the reloc.
24700 see fixup_segment() in write.c
24701 The S_IS_EXTERNAL test handles the case of global symbols.
24702 Those need the calculated base, not just the pipe compensation the linker will need. */
24703 if (fixP->fx_pcrel
24704 && fixP->fx_addsy != NULL
24705 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24706 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
24707 return base + 8;
24708 return base;
24709 #else
24710 return base + 8;
24711 #endif
24712
24713
24714 /* ARM mode loads relative to PC are also offset by +8. Unlike
24715 branches, the Windows CE loader *does* expect the relocation
24716 to take this into account. */
24717 case BFD_RELOC_ARM_OFFSET_IMM:
24718 case BFD_RELOC_ARM_OFFSET_IMM8:
24719 case BFD_RELOC_ARM_HWLITERAL:
24720 case BFD_RELOC_ARM_LITERAL:
24721 case BFD_RELOC_ARM_CP_OFF_IMM:
24722 return base + 8;
24723
24724
24725 /* Other PC-relative relocations are un-offset. */
24726 default:
24727 return base;
24728 }
24729 }
24730
24731 static bfd_boolean flag_warn_syms = TRUE;
24732
24733 bfd_boolean
24734 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
24735 {
24736 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24737 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24738 does mean that the resulting code might be very confusing to the reader.
24739 Also this warning can be triggered if the user omits an operand before
24740 an immediate address, eg:
24741
24742 LDR =foo
24743
24744 GAS treats this as an assignment of the value of the symbol foo to a
24745 symbol LDR, and so (without this code) it will not issue any kind of
24746 warning or error message.
24747
24748 Note - ARM instructions are case-insensitive but the strings in the hash
24749 table are all stored in lower case, so we must first ensure that name is
24750 lower case too. */
24751 if (flag_warn_syms && arm_ops_hsh)
24752 {
24753 char * nbuf = strdup (name);
24754 char * p;
24755
24756 for (p = nbuf; *p; p++)
24757 *p = TOLOWER (*p);
24758 if (hash_find (arm_ops_hsh, nbuf) != NULL)
24759 {
24760 static struct hash_control * already_warned = NULL;
24761
24762 if (already_warned == NULL)
24763 already_warned = hash_new ();
24764 /* Only warn about the symbol once. To keep the code
24765 simple we let hash_insert do the lookup for us. */
24766 if (hash_insert (already_warned, nbuf, NULL) == NULL)
24767 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
24768 }
24769 else
24770 free (nbuf);
24771 }
24772
24773 return FALSE;
24774 }
24775
24776 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24777 Otherwise we have no need to default values of symbols. */
24778
24779 symbolS *
24780 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
24781 {
24782 #ifdef OBJ_ELF
24783 if (name[0] == '_' && name[1] == 'G'
24784 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
24785 {
24786 if (!GOT_symbol)
24787 {
24788 if (symbol_find (name))
24789 as_bad (_("GOT already in the symbol table"));
24790
24791 GOT_symbol = symbol_new (name, undefined_section,
24792 (valueT) 0, & zero_address_frag);
24793 }
24794
24795 return GOT_symbol;
24796 }
24797 #endif
24798
24799 return NULL;
24800 }
24801
24802 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24803 computed as two separate immediate values, added together. We
24804 already know that this value cannot be computed by just one ARM
24805 instruction. */
24806
24807 static unsigned int
24808 validate_immediate_twopart (unsigned int val,
24809 unsigned int * highpart)
24810 {
24811 unsigned int a;
24812 unsigned int i;
24813
24814 for (i = 0; i < 32; i += 2)
24815 if (((a = rotate_left (val, i)) & 0xff) != 0)
24816 {
24817 if (a & 0xff00)
24818 {
24819 if (a & ~ 0xffff)
24820 continue;
24821 * highpart = (a >> 8) | ((i + 24) << 7);
24822 }
24823 else if (a & 0xff0000)
24824 {
24825 if (a & 0xff000000)
24826 continue;
24827 * highpart = (a >> 16) | ((i + 16) << 7);
24828 }
24829 else
24830 {
24831 gas_assert (a & 0xff000000);
24832 * highpart = (a >> 24) | ((i + 8) << 7);
24833 }
24834
24835 return (a & 0xff) | (i << 7);
24836 }
24837
24838 return FAIL;
24839 }
24840
24841 static int
24842 validate_offset_imm (unsigned int val, int hwse)
24843 {
24844 if ((hwse && val > 255) || val > 4095)
24845 return FAIL;
24846 return val;
24847 }
24848
24849 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24850 negative immediate constant by altering the instruction. A bit of
24851 a hack really.
24852 MOV <-> MVN
24853 AND <-> BIC
24854 ADC <-> SBC
24855 by inverting the second operand, and
24856 ADD <-> SUB
24857 CMP <-> CMN
24858 by negating the second operand. */
24859
24860 static int
24861 negate_data_op (unsigned long * instruction,
24862 unsigned long value)
24863 {
24864 int op, new_inst;
24865 unsigned long negated, inverted;
24866
24867 negated = encode_arm_immediate (-value);
24868 inverted = encode_arm_immediate (~value);
24869
24870 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
24871 switch (op)
24872 {
24873 /* First negates. */
24874 case OPCODE_SUB: /* ADD <-> SUB */
24875 new_inst = OPCODE_ADD;
24876 value = negated;
24877 break;
24878
24879 case OPCODE_ADD:
24880 new_inst = OPCODE_SUB;
24881 value = negated;
24882 break;
24883
24884 case OPCODE_CMP: /* CMP <-> CMN */
24885 new_inst = OPCODE_CMN;
24886 value = negated;
24887 break;
24888
24889 case OPCODE_CMN:
24890 new_inst = OPCODE_CMP;
24891 value = negated;
24892 break;
24893
24894 /* Now Inverted ops. */
24895 case OPCODE_MOV: /* MOV <-> MVN */
24896 new_inst = OPCODE_MVN;
24897 value = inverted;
24898 break;
24899
24900 case OPCODE_MVN:
24901 new_inst = OPCODE_MOV;
24902 value = inverted;
24903 break;
24904
24905 case OPCODE_AND: /* AND <-> BIC */
24906 new_inst = OPCODE_BIC;
24907 value = inverted;
24908 break;
24909
24910 case OPCODE_BIC:
24911 new_inst = OPCODE_AND;
24912 value = inverted;
24913 break;
24914
24915 case OPCODE_ADC: /* ADC <-> SBC */
24916 new_inst = OPCODE_SBC;
24917 value = inverted;
24918 break;
24919
24920 case OPCODE_SBC:
24921 new_inst = OPCODE_ADC;
24922 value = inverted;
24923 break;
24924
24925 /* We cannot do anything. */
24926 default:
24927 return FAIL;
24928 }
24929
24930 if (value == (unsigned) FAIL)
24931 return FAIL;
24932
24933 *instruction &= OPCODE_MASK;
24934 *instruction |= new_inst << DATA_OP_SHIFT;
24935 return value;
24936 }
24937
24938 /* Like negate_data_op, but for Thumb-2. */
24939
24940 static unsigned int
24941 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
24942 {
24943 int op, new_inst;
24944 int rd;
24945 unsigned int negated, inverted;
24946
24947 negated = encode_thumb32_immediate (-value);
24948 inverted = encode_thumb32_immediate (~value);
24949
24950 rd = (*instruction >> 8) & 0xf;
24951 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
24952 switch (op)
24953 {
24954 /* ADD <-> SUB. Includes CMP <-> CMN. */
24955 case T2_OPCODE_SUB:
24956 new_inst = T2_OPCODE_ADD;
24957 value = negated;
24958 break;
24959
24960 case T2_OPCODE_ADD:
24961 new_inst = T2_OPCODE_SUB;
24962 value = negated;
24963 break;
24964
24965 /* ORR <-> ORN. Includes MOV <-> MVN. */
24966 case T2_OPCODE_ORR:
24967 new_inst = T2_OPCODE_ORN;
24968 value = inverted;
24969 break;
24970
24971 case T2_OPCODE_ORN:
24972 new_inst = T2_OPCODE_ORR;
24973 value = inverted;
24974 break;
24975
24976 /* AND <-> BIC. TST has no inverted equivalent. */
24977 case T2_OPCODE_AND:
24978 new_inst = T2_OPCODE_BIC;
24979 if (rd == 15)
24980 value = FAIL;
24981 else
24982 value = inverted;
24983 break;
24984
24985 case T2_OPCODE_BIC:
24986 new_inst = T2_OPCODE_AND;
24987 value = inverted;
24988 break;
24989
24990 /* ADC <-> SBC */
24991 case T2_OPCODE_ADC:
24992 new_inst = T2_OPCODE_SBC;
24993 value = inverted;
24994 break;
24995
24996 case T2_OPCODE_SBC:
24997 new_inst = T2_OPCODE_ADC;
24998 value = inverted;
24999 break;
25000
25001 /* We cannot do anything. */
25002 default:
25003 return FAIL;
25004 }
25005
25006 if (value == (unsigned int)FAIL)
25007 return FAIL;
25008
25009 *instruction &= T2_OPCODE_MASK;
25010 *instruction |= new_inst << T2_DATA_OP_SHIFT;
25011 return value;
25012 }
25013
25014 /* Read a 32-bit thumb instruction from buf. */
25015
25016 static unsigned long
25017 get_thumb32_insn (char * buf)
25018 {
25019 unsigned long insn;
25020 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
25021 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25022
25023 return insn;
25024 }
25025
25026 /* We usually want to set the low bit on the address of thumb function
25027 symbols. In particular .word foo - . should have the low bit set.
25028 Generic code tries to fold the difference of two symbols to
25029 a constant. Prevent this and force a relocation when the first symbols
25030 is a thumb function. */
25031
25032 bfd_boolean
25033 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
25034 {
25035 if (op == O_subtract
25036 && l->X_op == O_symbol
25037 && r->X_op == O_symbol
25038 && THUMB_IS_FUNC (l->X_add_symbol))
25039 {
25040 l->X_op = O_subtract;
25041 l->X_op_symbol = r->X_add_symbol;
25042 l->X_add_number -= r->X_add_number;
25043 return TRUE;
25044 }
25045
25046 /* Process as normal. */
25047 return FALSE;
25048 }
25049
25050 /* Encode Thumb2 unconditional branches and calls. The encoding
25051 for the 2 are identical for the immediate values. */
25052
25053 static void
25054 encode_thumb2_b_bl_offset (char * buf, offsetT value)
25055 {
25056 #define T2I1I2MASK ((1 << 13) | (1 << 11))
25057 offsetT newval;
25058 offsetT newval2;
25059 addressT S, I1, I2, lo, hi;
25060
25061 S = (value >> 24) & 0x01;
25062 I1 = (value >> 23) & 0x01;
25063 I2 = (value >> 22) & 0x01;
25064 hi = (value >> 12) & 0x3ff;
25065 lo = (value >> 1) & 0x7ff;
25066 newval = md_chars_to_number (buf, THUMB_SIZE);
25067 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25068 newval |= (S << 10) | hi;
25069 newval2 &= ~T2I1I2MASK;
25070 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
25071 md_number_to_chars (buf, newval, THUMB_SIZE);
25072 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25073 }
25074
25075 void
25076 md_apply_fix (fixS * fixP,
25077 valueT * valP,
25078 segT seg)
25079 {
25080 offsetT value = * valP;
25081 offsetT newval;
25082 unsigned int newimm;
25083 unsigned long temp;
25084 int sign;
25085 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
25086
25087 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
25088
25089 /* Note whether this will delete the relocation. */
25090
25091 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
25092 fixP->fx_done = 1;
25093
25094 /* On a 64-bit host, silently truncate 'value' to 32 bits for
25095 consistency with the behaviour on 32-bit hosts. Remember value
25096 for emit_reloc. */
25097 value &= 0xffffffff;
25098 value ^= 0x80000000;
25099 value -= 0x80000000;
25100
25101 *valP = value;
25102 fixP->fx_addnumber = value;
25103
25104 /* Same treatment for fixP->fx_offset. */
25105 fixP->fx_offset &= 0xffffffff;
25106 fixP->fx_offset ^= 0x80000000;
25107 fixP->fx_offset -= 0x80000000;
25108
25109 switch (fixP->fx_r_type)
25110 {
25111 case BFD_RELOC_NONE:
25112 /* This will need to go in the object file. */
25113 fixP->fx_done = 0;
25114 break;
25115
25116 case BFD_RELOC_ARM_IMMEDIATE:
25117 /* We claim that this fixup has been processed here,
25118 even if in fact we generate an error because we do
25119 not have a reloc for it, so tc_gen_reloc will reject it. */
25120 fixP->fx_done = 1;
25121
25122 if (fixP->fx_addsy)
25123 {
25124 const char *msg = 0;
25125
25126 if (! S_IS_DEFINED (fixP->fx_addsy))
25127 msg = _("undefined symbol %s used as an immediate value");
25128 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
25129 msg = _("symbol %s is in a different section");
25130 else if (S_IS_WEAK (fixP->fx_addsy))
25131 msg = _("symbol %s is weak and may be overridden later");
25132
25133 if (msg)
25134 {
25135 as_bad_where (fixP->fx_file, fixP->fx_line,
25136 msg, S_GET_NAME (fixP->fx_addsy));
25137 break;
25138 }
25139 }
25140
25141 temp = md_chars_to_number (buf, INSN_SIZE);
25142
25143 /* If the offset is negative, we should use encoding A2 for ADR. */
25144 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
25145 newimm = negate_data_op (&temp, value);
25146 else
25147 {
25148 newimm = encode_arm_immediate (value);
25149
25150 /* If the instruction will fail, see if we can fix things up by
25151 changing the opcode. */
25152 if (newimm == (unsigned int) FAIL)
25153 newimm = negate_data_op (&temp, value);
25154 /* MOV accepts both ARM modified immediate (A1 encoding) and
25155 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
25156 When disassembling, MOV is preferred when there is no encoding
25157 overlap. */
25158 if (newimm == (unsigned int) FAIL
25159 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
25160 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
25161 && !((temp >> SBIT_SHIFT) & 0x1)
25162 && value >= 0 && value <= 0xffff)
25163 {
25164 /* Clear bits[23:20] to change encoding from A1 to A2. */
25165 temp &= 0xff0fffff;
25166 /* Encoding high 4bits imm. Code below will encode the remaining
25167 low 12bits. */
25168 temp |= (value & 0x0000f000) << 4;
25169 newimm = value & 0x00000fff;
25170 }
25171 }
25172
25173 if (newimm == (unsigned int) FAIL)
25174 {
25175 as_bad_where (fixP->fx_file, fixP->fx_line,
25176 _("invalid constant (%lx) after fixup"),
25177 (unsigned long) value);
25178 break;
25179 }
25180
25181 newimm |= (temp & 0xfffff000);
25182 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
25183 break;
25184
25185 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
25186 {
25187 unsigned int highpart = 0;
25188 unsigned int newinsn = 0xe1a00000; /* nop. */
25189
25190 if (fixP->fx_addsy)
25191 {
25192 const char *msg = 0;
25193
25194 if (! S_IS_DEFINED (fixP->fx_addsy))
25195 msg = _("undefined symbol %s used as an immediate value");
25196 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
25197 msg = _("symbol %s is in a different section");
25198 else if (S_IS_WEAK (fixP->fx_addsy))
25199 msg = _("symbol %s is weak and may be overridden later");
25200
25201 if (msg)
25202 {
25203 as_bad_where (fixP->fx_file, fixP->fx_line,
25204 msg, S_GET_NAME (fixP->fx_addsy));
25205 break;
25206 }
25207 }
25208
25209 newimm = encode_arm_immediate (value);
25210 temp = md_chars_to_number (buf, INSN_SIZE);
25211
25212 /* If the instruction will fail, see if we can fix things up by
25213 changing the opcode. */
25214 if (newimm == (unsigned int) FAIL
25215 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
25216 {
25217 /* No ? OK - try using two ADD instructions to generate
25218 the value. */
25219 newimm = validate_immediate_twopart (value, & highpart);
25220
25221 /* Yes - then make sure that the second instruction is
25222 also an add. */
25223 if (newimm != (unsigned int) FAIL)
25224 newinsn = temp;
25225 /* Still No ? Try using a negated value. */
25226 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
25227 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
25228 /* Otherwise - give up. */
25229 else
25230 {
25231 as_bad_where (fixP->fx_file, fixP->fx_line,
25232 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
25233 (long) value);
25234 break;
25235 }
25236
25237 /* Replace the first operand in the 2nd instruction (which
25238 is the PC) with the destination register. We have
25239 already added in the PC in the first instruction and we
25240 do not want to do it again. */
25241 newinsn &= ~ 0xf0000;
25242 newinsn |= ((newinsn & 0x0f000) << 4);
25243 }
25244
25245 newimm |= (temp & 0xfffff000);
25246 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
25247
25248 highpart |= (newinsn & 0xfffff000);
25249 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
25250 }
25251 break;
25252
25253 case BFD_RELOC_ARM_OFFSET_IMM:
25254 if (!fixP->fx_done && seg->use_rela_p)
25255 value = 0;
25256 /* Fall through. */
25257
25258 case BFD_RELOC_ARM_LITERAL:
25259 sign = value > 0;
25260
25261 if (value < 0)
25262 value = - value;
25263
25264 if (validate_offset_imm (value, 0) == FAIL)
25265 {
25266 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
25267 as_bad_where (fixP->fx_file, fixP->fx_line,
25268 _("invalid literal constant: pool needs to be closer"));
25269 else
25270 as_bad_where (fixP->fx_file, fixP->fx_line,
25271 _("bad immediate value for offset (%ld)"),
25272 (long) value);
25273 break;
25274 }
25275
25276 newval = md_chars_to_number (buf, INSN_SIZE);
25277 if (value == 0)
25278 newval &= 0xfffff000;
25279 else
25280 {
25281 newval &= 0xff7ff000;
25282 newval |= value | (sign ? INDEX_UP : 0);
25283 }
25284 md_number_to_chars (buf, newval, INSN_SIZE);
25285 break;
25286
25287 case BFD_RELOC_ARM_OFFSET_IMM8:
25288 case BFD_RELOC_ARM_HWLITERAL:
25289 sign = value > 0;
25290
25291 if (value < 0)
25292 value = - value;
25293
25294 if (validate_offset_imm (value, 1) == FAIL)
25295 {
25296 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
25297 as_bad_where (fixP->fx_file, fixP->fx_line,
25298 _("invalid literal constant: pool needs to be closer"));
25299 else
25300 as_bad_where (fixP->fx_file, fixP->fx_line,
25301 _("bad immediate value for 8-bit offset (%ld)"),
25302 (long) value);
25303 break;
25304 }
25305
25306 newval = md_chars_to_number (buf, INSN_SIZE);
25307 if (value == 0)
25308 newval &= 0xfffff0f0;
25309 else
25310 {
25311 newval &= 0xff7ff0f0;
25312 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
25313 }
25314 md_number_to_chars (buf, newval, INSN_SIZE);
25315 break;
25316
25317 case BFD_RELOC_ARM_T32_OFFSET_U8:
25318 if (value < 0 || value > 1020 || value % 4 != 0)
25319 as_bad_where (fixP->fx_file, fixP->fx_line,
25320 _("bad immediate value for offset (%ld)"), (long) value);
25321 value /= 4;
25322
25323 newval = md_chars_to_number (buf+2, THUMB_SIZE);
25324 newval |= value;
25325 md_number_to_chars (buf+2, newval, THUMB_SIZE);
25326 break;
25327
25328 case BFD_RELOC_ARM_T32_OFFSET_IMM:
25329 /* This is a complicated relocation used for all varieties of Thumb32
25330 load/store instruction with immediate offset:
25331
25332 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
25333 *4, optional writeback(W)
25334 (doubleword load/store)
25335
25336 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
25337 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
25338 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
25339 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
25340 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
25341
25342 Uppercase letters indicate bits that are already encoded at
25343 this point. Lowercase letters are our problem. For the
25344 second block of instructions, the secondary opcode nybble
25345 (bits 8..11) is present, and bit 23 is zero, even if this is
25346 a PC-relative operation. */
25347 newval = md_chars_to_number (buf, THUMB_SIZE);
25348 newval <<= 16;
25349 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
25350
25351 if ((newval & 0xf0000000) == 0xe0000000)
25352 {
25353 /* Doubleword load/store: 8-bit offset, scaled by 4. */
25354 if (value >= 0)
25355 newval |= (1 << 23);
25356 else
25357 value = -value;
25358 if (value % 4 != 0)
25359 {
25360 as_bad_where (fixP->fx_file, fixP->fx_line,
25361 _("offset not a multiple of 4"));
25362 break;
25363 }
25364 value /= 4;
25365 if (value > 0xff)
25366 {
25367 as_bad_where (fixP->fx_file, fixP->fx_line,
25368 _("offset out of range"));
25369 break;
25370 }
25371 newval &= ~0xff;
25372 }
25373 else if ((newval & 0x000f0000) == 0x000f0000)
25374 {
25375 /* PC-relative, 12-bit offset. */
25376 if (value >= 0)
25377 newval |= (1 << 23);
25378 else
25379 value = -value;
25380 if (value > 0xfff)
25381 {
25382 as_bad_where (fixP->fx_file, fixP->fx_line,
25383 _("offset out of range"));
25384 break;
25385 }
25386 newval &= ~0xfff;
25387 }
25388 else if ((newval & 0x00000100) == 0x00000100)
25389 {
25390 /* Writeback: 8-bit, +/- offset. */
25391 if (value >= 0)
25392 newval |= (1 << 9);
25393 else
25394 value = -value;
25395 if (value > 0xff)
25396 {
25397 as_bad_where (fixP->fx_file, fixP->fx_line,
25398 _("offset out of range"));
25399 break;
25400 }
25401 newval &= ~0xff;
25402 }
25403 else if ((newval & 0x00000f00) == 0x00000e00)
25404 {
25405 /* T-instruction: positive 8-bit offset. */
25406 if (value < 0 || value > 0xff)
25407 {
25408 as_bad_where (fixP->fx_file, fixP->fx_line,
25409 _("offset out of range"));
25410 break;
25411 }
25412 newval &= ~0xff;
25413 newval |= value;
25414 }
25415 else
25416 {
25417 /* Positive 12-bit or negative 8-bit offset. */
25418 int limit;
25419 if (value >= 0)
25420 {
25421 newval |= (1 << 23);
25422 limit = 0xfff;
25423 }
25424 else
25425 {
25426 value = -value;
25427 limit = 0xff;
25428 }
25429 if (value > limit)
25430 {
25431 as_bad_where (fixP->fx_file, fixP->fx_line,
25432 _("offset out of range"));
25433 break;
25434 }
25435 newval &= ~limit;
25436 }
25437
25438 newval |= value;
25439 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
25440 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
25441 break;
25442
25443 case BFD_RELOC_ARM_SHIFT_IMM:
25444 newval = md_chars_to_number (buf, INSN_SIZE);
25445 if (((unsigned long) value) > 32
25446 || (value == 32
25447 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
25448 {
25449 as_bad_where (fixP->fx_file, fixP->fx_line,
25450 _("shift expression is too large"));
25451 break;
25452 }
25453
25454 if (value == 0)
25455 /* Shifts of zero must be done as lsl. */
25456 newval &= ~0x60;
25457 else if (value == 32)
25458 value = 0;
25459 newval &= 0xfffff07f;
25460 newval |= (value & 0x1f) << 7;
25461 md_number_to_chars (buf, newval, INSN_SIZE);
25462 break;
25463
25464 case BFD_RELOC_ARM_T32_IMMEDIATE:
25465 case BFD_RELOC_ARM_T32_ADD_IMM:
25466 case BFD_RELOC_ARM_T32_IMM12:
25467 case BFD_RELOC_ARM_T32_ADD_PC12:
25468 /* We claim that this fixup has been processed here,
25469 even if in fact we generate an error because we do
25470 not have a reloc for it, so tc_gen_reloc will reject it. */
25471 fixP->fx_done = 1;
25472
25473 if (fixP->fx_addsy
25474 && ! S_IS_DEFINED (fixP->fx_addsy))
25475 {
25476 as_bad_where (fixP->fx_file, fixP->fx_line,
25477 _("undefined symbol %s used as an immediate value"),
25478 S_GET_NAME (fixP->fx_addsy));
25479 break;
25480 }
25481
25482 newval = md_chars_to_number (buf, THUMB_SIZE);
25483 newval <<= 16;
25484 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
25485
25486 newimm = FAIL;
25487 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25488 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25489 Thumb2 modified immediate encoding (T2). */
25490 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
25491 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25492 {
25493 newimm = encode_thumb32_immediate (value);
25494 if (newimm == (unsigned int) FAIL)
25495 newimm = thumb32_negate_data_op (&newval, value);
25496 }
25497 if (newimm == (unsigned int) FAIL)
25498 {
25499 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
25500 {
25501 /* Turn add/sum into addw/subw. */
25502 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25503 newval = (newval & 0xfeffffff) | 0x02000000;
25504 /* No flat 12-bit imm encoding for addsw/subsw. */
25505 if ((newval & 0x00100000) == 0)
25506 {
25507 /* 12 bit immediate for addw/subw. */
25508 if (value < 0)
25509 {
25510 value = -value;
25511 newval ^= 0x00a00000;
25512 }
25513 if (value > 0xfff)
25514 newimm = (unsigned int) FAIL;
25515 else
25516 newimm = value;
25517 }
25518 }
25519 else
25520 {
25521 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25522 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25523 disassembling, MOV is preferred when there is no encoding
25524 overlap. */
25525 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
25526 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25527 but with the Rn field [19:16] set to 1111. */
25528 && (((newval >> 16) & 0xf) == 0xf)
25529 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
25530 && !((newval >> T2_SBIT_SHIFT) & 0x1)
25531 && value >= 0 && value <= 0xffff)
25532 {
25533 /* Toggle bit[25] to change encoding from T2 to T3. */
25534 newval ^= 1 << 25;
25535 /* Clear bits[19:16]. */
25536 newval &= 0xfff0ffff;
25537 /* Encoding high 4bits imm. Code below will encode the
25538 remaining low 12bits. */
25539 newval |= (value & 0x0000f000) << 4;
25540 newimm = value & 0x00000fff;
25541 }
25542 }
25543 }
25544
25545 if (newimm == (unsigned int)FAIL)
25546 {
25547 as_bad_where (fixP->fx_file, fixP->fx_line,
25548 _("invalid constant (%lx) after fixup"),
25549 (unsigned long) value);
25550 break;
25551 }
25552
25553 newval |= (newimm & 0x800) << 15;
25554 newval |= (newimm & 0x700) << 4;
25555 newval |= (newimm & 0x0ff);
25556
25557 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
25558 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
25559 break;
25560
25561 case BFD_RELOC_ARM_SMC:
25562 if (((unsigned long) value) > 0xffff)
25563 as_bad_where (fixP->fx_file, fixP->fx_line,
25564 _("invalid smc expression"));
25565 newval = md_chars_to_number (buf, INSN_SIZE);
25566 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25567 md_number_to_chars (buf, newval, INSN_SIZE);
25568 break;
25569
25570 case BFD_RELOC_ARM_HVC:
25571 if (((unsigned long) value) > 0xffff)
25572 as_bad_where (fixP->fx_file, fixP->fx_line,
25573 _("invalid hvc expression"));
25574 newval = md_chars_to_number (buf, INSN_SIZE);
25575 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25576 md_number_to_chars (buf, newval, INSN_SIZE);
25577 break;
25578
25579 case BFD_RELOC_ARM_SWI:
25580 if (fixP->tc_fix_data != 0)
25581 {
25582 if (((unsigned long) value) > 0xff)
25583 as_bad_where (fixP->fx_file, fixP->fx_line,
25584 _("invalid swi expression"));
25585 newval = md_chars_to_number (buf, THUMB_SIZE);
25586 newval |= value;
25587 md_number_to_chars (buf, newval, THUMB_SIZE);
25588 }
25589 else
25590 {
25591 if (((unsigned long) value) > 0x00ffffff)
25592 as_bad_where (fixP->fx_file, fixP->fx_line,
25593 _("invalid swi expression"));
25594 newval = md_chars_to_number (buf, INSN_SIZE);
25595 newval |= value;
25596 md_number_to_chars (buf, newval, INSN_SIZE);
25597 }
25598 break;
25599
25600 case BFD_RELOC_ARM_MULTI:
25601 if (((unsigned long) value) > 0xffff)
25602 as_bad_where (fixP->fx_file, fixP->fx_line,
25603 _("invalid expression in load/store multiple"));
25604 newval = value | md_chars_to_number (buf, INSN_SIZE);
25605 md_number_to_chars (buf, newval, INSN_SIZE);
25606 break;
25607
25608 #ifdef OBJ_ELF
25609 case BFD_RELOC_ARM_PCREL_CALL:
25610
25611 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25612 && fixP->fx_addsy
25613 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25614 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25615 && THUMB_IS_FUNC (fixP->fx_addsy))
25616 /* Flip the bl to blx. This is a simple flip
25617 bit here because we generate PCREL_CALL for
25618 unconditional bls. */
25619 {
25620 newval = md_chars_to_number (buf, INSN_SIZE);
25621 newval = newval | 0x10000000;
25622 md_number_to_chars (buf, newval, INSN_SIZE);
25623 temp = 1;
25624 fixP->fx_done = 1;
25625 }
25626 else
25627 temp = 3;
25628 goto arm_branch_common;
25629
25630 case BFD_RELOC_ARM_PCREL_JUMP:
25631 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25632 && fixP->fx_addsy
25633 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25634 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25635 && THUMB_IS_FUNC (fixP->fx_addsy))
25636 {
25637 /* This would map to a bl<cond>, b<cond>,
25638 b<always> to a Thumb function. We
25639 need to force a relocation for this particular
25640 case. */
25641 newval = md_chars_to_number (buf, INSN_SIZE);
25642 fixP->fx_done = 0;
25643 }
25644 /* Fall through. */
25645
25646 case BFD_RELOC_ARM_PLT32:
25647 #endif
25648 case BFD_RELOC_ARM_PCREL_BRANCH:
25649 temp = 3;
25650 goto arm_branch_common;
25651
25652 case BFD_RELOC_ARM_PCREL_BLX:
25653
25654 temp = 1;
25655 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25656 && fixP->fx_addsy
25657 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25658 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25659 && ARM_IS_FUNC (fixP->fx_addsy))
25660 {
25661 /* Flip the blx to a bl and warn. */
25662 const char *name = S_GET_NAME (fixP->fx_addsy);
25663 newval = 0xeb000000;
25664 as_warn_where (fixP->fx_file, fixP->fx_line,
25665 _("blx to '%s' an ARM ISA state function changed to bl"),
25666 name);
25667 md_number_to_chars (buf, newval, INSN_SIZE);
25668 temp = 3;
25669 fixP->fx_done = 1;
25670 }
25671
25672 #ifdef OBJ_ELF
25673 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25674 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
25675 #endif
25676
25677 arm_branch_common:
25678 /* We are going to store value (shifted right by two) in the
25679 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25680 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25681 also be clear. */
25682 if (value & temp)
25683 as_bad_where (fixP->fx_file, fixP->fx_line,
25684 _("misaligned branch destination"));
25685 if ((value & (offsetT)0xfe000000) != (offsetT)0
25686 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
25687 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25688
25689 if (fixP->fx_done || !seg->use_rela_p)
25690 {
25691 newval = md_chars_to_number (buf, INSN_SIZE);
25692 newval |= (value >> 2) & 0x00ffffff;
25693 /* Set the H bit on BLX instructions. */
25694 if (temp == 1)
25695 {
25696 if (value & 2)
25697 newval |= 0x01000000;
25698 else
25699 newval &= ~0x01000000;
25700 }
25701 md_number_to_chars (buf, newval, INSN_SIZE);
25702 }
25703 break;
25704
25705 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
25706 /* CBZ can only branch forward. */
25707
25708 /* Attempts to use CBZ to branch to the next instruction
25709 (which, strictly speaking, are prohibited) will be turned into
25710 no-ops.
25711
25712 FIXME: It may be better to remove the instruction completely and
25713 perform relaxation. */
25714 if (value == -2)
25715 {
25716 newval = md_chars_to_number (buf, THUMB_SIZE);
25717 newval = 0xbf00; /* NOP encoding T1 */
25718 md_number_to_chars (buf, newval, THUMB_SIZE);
25719 }
25720 else
25721 {
25722 if (value & ~0x7e)
25723 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25724
25725 if (fixP->fx_done || !seg->use_rela_p)
25726 {
25727 newval = md_chars_to_number (buf, THUMB_SIZE);
25728 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
25729 md_number_to_chars (buf, newval, THUMB_SIZE);
25730 }
25731 }
25732 break;
25733
25734 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
25735 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
25736 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25737
25738 if (fixP->fx_done || !seg->use_rela_p)
25739 {
25740 newval = md_chars_to_number (buf, THUMB_SIZE);
25741 newval |= (value & 0x1ff) >> 1;
25742 md_number_to_chars (buf, newval, THUMB_SIZE);
25743 }
25744 break;
25745
25746 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
25747 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
25748 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25749
25750 if (fixP->fx_done || !seg->use_rela_p)
25751 {
25752 newval = md_chars_to_number (buf, THUMB_SIZE);
25753 newval |= (value & 0xfff) >> 1;
25754 md_number_to_chars (buf, newval, THUMB_SIZE);
25755 }
25756 break;
25757
25758 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25759 if (fixP->fx_addsy
25760 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25761 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25762 && ARM_IS_FUNC (fixP->fx_addsy)
25763 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25764 {
25765 /* Force a relocation for a branch 20 bits wide. */
25766 fixP->fx_done = 0;
25767 }
25768 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
25769 as_bad_where (fixP->fx_file, fixP->fx_line,
25770 _("conditional branch out of range"));
25771
25772 if (fixP->fx_done || !seg->use_rela_p)
25773 {
25774 offsetT newval2;
25775 addressT S, J1, J2, lo, hi;
25776
25777 S = (value & 0x00100000) >> 20;
25778 J2 = (value & 0x00080000) >> 19;
25779 J1 = (value & 0x00040000) >> 18;
25780 hi = (value & 0x0003f000) >> 12;
25781 lo = (value & 0x00000ffe) >> 1;
25782
25783 newval = md_chars_to_number (buf, THUMB_SIZE);
25784 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25785 newval |= (S << 10) | hi;
25786 newval2 |= (J1 << 13) | (J2 << 11) | lo;
25787 md_number_to_chars (buf, newval, THUMB_SIZE);
25788 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25789 }
25790 break;
25791
25792 case BFD_RELOC_THUMB_PCREL_BLX:
25793 /* If there is a blx from a thumb state function to
25794 another thumb function flip this to a bl and warn
25795 about it. */
25796
25797 if (fixP->fx_addsy
25798 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25799 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25800 && THUMB_IS_FUNC (fixP->fx_addsy))
25801 {
25802 const char *name = S_GET_NAME (fixP->fx_addsy);
25803 as_warn_where (fixP->fx_file, fixP->fx_line,
25804 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25805 name);
25806 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25807 newval = newval | 0x1000;
25808 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25809 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25810 fixP->fx_done = 1;
25811 }
25812
25813
25814 goto thumb_bl_common;
25815
25816 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25817 /* A bl from Thumb state ISA to an internal ARM state function
25818 is converted to a blx. */
25819 if (fixP->fx_addsy
25820 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25821 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25822 && ARM_IS_FUNC (fixP->fx_addsy)
25823 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25824 {
25825 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25826 newval = newval & ~0x1000;
25827 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25828 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
25829 fixP->fx_done = 1;
25830 }
25831
25832 thumb_bl_common:
25833
25834 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25835 /* For a BLX instruction, make sure that the relocation is rounded up
25836 to a word boundary. This follows the semantics of the instruction
25837 which specifies that bit 1 of the target address will come from bit
25838 1 of the base address. */
25839 value = (value + 3) & ~ 3;
25840
25841 #ifdef OBJ_ELF
25842 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
25843 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25844 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25845 #endif
25846
25847 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
25848 {
25849 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
25850 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25851 else if ((value & ~0x1ffffff)
25852 && ((value & ~0x1ffffff) != ~0x1ffffff))
25853 as_bad_where (fixP->fx_file, fixP->fx_line,
25854 _("Thumb2 branch out of range"));
25855 }
25856
25857 if (fixP->fx_done || !seg->use_rela_p)
25858 encode_thumb2_b_bl_offset (buf, value);
25859
25860 break;
25861
25862 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25863 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
25864 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25865
25866 if (fixP->fx_done || !seg->use_rela_p)
25867 encode_thumb2_b_bl_offset (buf, value);
25868
25869 break;
25870
25871 case BFD_RELOC_8:
25872 if (fixP->fx_done || !seg->use_rela_p)
25873 *buf = value;
25874 break;
25875
25876 case BFD_RELOC_16:
25877 if (fixP->fx_done || !seg->use_rela_p)
25878 md_number_to_chars (buf, value, 2);
25879 break;
25880
25881 #ifdef OBJ_ELF
25882 case BFD_RELOC_ARM_TLS_CALL:
25883 case BFD_RELOC_ARM_THM_TLS_CALL:
25884 case BFD_RELOC_ARM_TLS_DESCSEQ:
25885 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25886 case BFD_RELOC_ARM_TLS_GOTDESC:
25887 case BFD_RELOC_ARM_TLS_GD32:
25888 case BFD_RELOC_ARM_TLS_LE32:
25889 case BFD_RELOC_ARM_TLS_IE32:
25890 case BFD_RELOC_ARM_TLS_LDM32:
25891 case BFD_RELOC_ARM_TLS_LDO32:
25892 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25893 break;
25894
25895 /* Same handling as above, but with the arm_fdpic guard. */
25896 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25897 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25898 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25899 if (arm_fdpic)
25900 {
25901 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25902 }
25903 else
25904 {
25905 as_bad_where (fixP->fx_file, fixP->fx_line,
25906 _("Relocation supported only in FDPIC mode"));
25907 }
25908 break;
25909
25910 case BFD_RELOC_ARM_GOT32:
25911 case BFD_RELOC_ARM_GOTOFF:
25912 break;
25913
25914 case BFD_RELOC_ARM_GOT_PREL:
25915 if (fixP->fx_done || !seg->use_rela_p)
25916 md_number_to_chars (buf, value, 4);
25917 break;
25918
25919 case BFD_RELOC_ARM_TARGET2:
25920 /* TARGET2 is not partial-inplace, so we need to write the
25921 addend here for REL targets, because it won't be written out
25922 during reloc processing later. */
25923 if (fixP->fx_done || !seg->use_rela_p)
25924 md_number_to_chars (buf, fixP->fx_offset, 4);
25925 break;
25926
25927 /* Relocations for FDPIC. */
25928 case BFD_RELOC_ARM_GOTFUNCDESC:
25929 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25930 case BFD_RELOC_ARM_FUNCDESC:
25931 if (arm_fdpic)
25932 {
25933 if (fixP->fx_done || !seg->use_rela_p)
25934 md_number_to_chars (buf, 0, 4);
25935 }
25936 else
25937 {
25938 as_bad_where (fixP->fx_file, fixP->fx_line,
25939 _("Relocation supported only in FDPIC mode"));
25940 }
25941 break;
25942 #endif
25943
25944 case BFD_RELOC_RVA:
25945 case BFD_RELOC_32:
25946 case BFD_RELOC_ARM_TARGET1:
25947 case BFD_RELOC_ARM_ROSEGREL32:
25948 case BFD_RELOC_ARM_SBREL32:
25949 case BFD_RELOC_32_PCREL:
25950 #ifdef TE_PE
25951 case BFD_RELOC_32_SECREL:
25952 #endif
25953 if (fixP->fx_done || !seg->use_rela_p)
25954 #ifdef TE_WINCE
25955 /* For WinCE we only do this for pcrel fixups. */
25956 if (fixP->fx_done || fixP->fx_pcrel)
25957 #endif
25958 md_number_to_chars (buf, value, 4);
25959 break;
25960
25961 #ifdef OBJ_ELF
25962 case BFD_RELOC_ARM_PREL31:
25963 if (fixP->fx_done || !seg->use_rela_p)
25964 {
25965 newval = md_chars_to_number (buf, 4) & 0x80000000;
25966 if ((value ^ (value >> 1)) & 0x40000000)
25967 {
25968 as_bad_where (fixP->fx_file, fixP->fx_line,
25969 _("rel31 relocation overflow"));
25970 }
25971 newval |= value & 0x7fffffff;
25972 md_number_to_chars (buf, newval, 4);
25973 }
25974 break;
25975 #endif
25976
25977 case BFD_RELOC_ARM_CP_OFF_IMM:
25978 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
25979 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
25980 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
25981 newval = md_chars_to_number (buf, INSN_SIZE);
25982 else
25983 newval = get_thumb32_insn (buf);
25984 if ((newval & 0x0f200f00) == 0x0d000900)
25985 {
25986 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25987 has permitted values that are multiples of 2, in the range 0
25988 to 510. */
25989 if (value < -510 || value > 510 || (value & 1))
25990 as_bad_where (fixP->fx_file, fixP->fx_line,
25991 _("co-processor offset out of range"));
25992 }
25993 else if ((newval & 0xfe001f80) == 0xec000f80)
25994 {
25995 if (value < -511 || value > 512 || (value & 3))
25996 as_bad_where (fixP->fx_file, fixP->fx_line,
25997 _("co-processor offset out of range"));
25998 }
25999 else if (value < -1023 || value > 1023 || (value & 3))
26000 as_bad_where (fixP->fx_file, fixP->fx_line,
26001 _("co-processor offset out of range"));
26002 cp_off_common:
26003 sign = value > 0;
26004 if (value < 0)
26005 value = -value;
26006 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
26007 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
26008 newval = md_chars_to_number (buf, INSN_SIZE);
26009 else
26010 newval = get_thumb32_insn (buf);
26011 if (value == 0)
26012 {
26013 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
26014 newval &= 0xffffff80;
26015 else
26016 newval &= 0xffffff00;
26017 }
26018 else
26019 {
26020 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
26021 newval &= 0xff7fff80;
26022 else
26023 newval &= 0xff7fff00;
26024 if ((newval & 0x0f200f00) == 0x0d000900)
26025 {
26026 /* This is a fp16 vstr/vldr.
26027
26028 It requires the immediate offset in the instruction is shifted
26029 left by 1 to be a half-word offset.
26030
26031 Here, left shift by 1 first, and later right shift by 2
26032 should get the right offset. */
26033 value <<= 1;
26034 }
26035 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
26036 }
26037 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
26038 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
26039 md_number_to_chars (buf, newval, INSN_SIZE);
26040 else
26041 put_thumb32_insn (buf, newval);
26042 break;
26043
26044 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
26045 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
26046 if (value < -255 || value > 255)
26047 as_bad_where (fixP->fx_file, fixP->fx_line,
26048 _("co-processor offset out of range"));
26049 value *= 4;
26050 goto cp_off_common;
26051
26052 case BFD_RELOC_ARM_THUMB_OFFSET:
26053 newval = md_chars_to_number (buf, THUMB_SIZE);
26054 /* Exactly what ranges, and where the offset is inserted depends
26055 on the type of instruction, we can establish this from the
26056 top 4 bits. */
26057 switch (newval >> 12)
26058 {
26059 case 4: /* PC load. */
26060 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
26061 forced to zero for these loads; md_pcrel_from has already
26062 compensated for this. */
26063 if (value & 3)
26064 as_bad_where (fixP->fx_file, fixP->fx_line,
26065 _("invalid offset, target not word aligned (0x%08lX)"),
26066 (((unsigned long) fixP->fx_frag->fr_address
26067 + (unsigned long) fixP->fx_where) & ~3)
26068 + (unsigned long) value);
26069
26070 if (value & ~0x3fc)
26071 as_bad_where (fixP->fx_file, fixP->fx_line,
26072 _("invalid offset, value too big (0x%08lX)"),
26073 (long) value);
26074
26075 newval |= value >> 2;
26076 break;
26077
26078 case 9: /* SP load/store. */
26079 if (value & ~0x3fc)
26080 as_bad_where (fixP->fx_file, fixP->fx_line,
26081 _("invalid offset, value too big (0x%08lX)"),
26082 (long) value);
26083 newval |= value >> 2;
26084 break;
26085
26086 case 6: /* Word load/store. */
26087 if (value & ~0x7c)
26088 as_bad_where (fixP->fx_file, fixP->fx_line,
26089 _("invalid offset, value too big (0x%08lX)"),
26090 (long) value);
26091 newval |= value << 4; /* 6 - 2. */
26092 break;
26093
26094 case 7: /* Byte load/store. */
26095 if (value & ~0x1f)
26096 as_bad_where (fixP->fx_file, fixP->fx_line,
26097 _("invalid offset, value too big (0x%08lX)"),
26098 (long) value);
26099 newval |= value << 6;
26100 break;
26101
26102 case 8: /* Halfword load/store. */
26103 if (value & ~0x3e)
26104 as_bad_where (fixP->fx_file, fixP->fx_line,
26105 _("invalid offset, value too big (0x%08lX)"),
26106 (long) value);
26107 newval |= value << 5; /* 6 - 1. */
26108 break;
26109
26110 default:
26111 as_bad_where (fixP->fx_file, fixP->fx_line,
26112 "Unable to process relocation for thumb opcode: %lx",
26113 (unsigned long) newval);
26114 break;
26115 }
26116 md_number_to_chars (buf, newval, THUMB_SIZE);
26117 break;
26118
26119 case BFD_RELOC_ARM_THUMB_ADD:
26120 /* This is a complicated relocation, since we use it for all of
26121 the following immediate relocations:
26122
26123 3bit ADD/SUB
26124 8bit ADD/SUB
26125 9bit ADD/SUB SP word-aligned
26126 10bit ADD PC/SP word-aligned
26127
26128 The type of instruction being processed is encoded in the
26129 instruction field:
26130
26131 0x8000 SUB
26132 0x00F0 Rd
26133 0x000F Rs
26134 */
26135 newval = md_chars_to_number (buf, THUMB_SIZE);
26136 {
26137 int rd = (newval >> 4) & 0xf;
26138 int rs = newval & 0xf;
26139 int subtract = !!(newval & 0x8000);
26140
26141 /* Check for HI regs, only very restricted cases allowed:
26142 Adjusting SP, and using PC or SP to get an address. */
26143 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
26144 || (rs > 7 && rs != REG_SP && rs != REG_PC))
26145 as_bad_where (fixP->fx_file, fixP->fx_line,
26146 _("invalid Hi register with immediate"));
26147
26148 /* If value is negative, choose the opposite instruction. */
26149 if (value < 0)
26150 {
26151 value = -value;
26152 subtract = !subtract;
26153 if (value < 0)
26154 as_bad_where (fixP->fx_file, fixP->fx_line,
26155 _("immediate value out of range"));
26156 }
26157
26158 if (rd == REG_SP)
26159 {
26160 if (value & ~0x1fc)
26161 as_bad_where (fixP->fx_file, fixP->fx_line,
26162 _("invalid immediate for stack address calculation"));
26163 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
26164 newval |= value >> 2;
26165 }
26166 else if (rs == REG_PC || rs == REG_SP)
26167 {
26168 /* PR gas/18541. If the addition is for a defined symbol
26169 within range of an ADR instruction then accept it. */
26170 if (subtract
26171 && value == 4
26172 && fixP->fx_addsy != NULL)
26173 {
26174 subtract = 0;
26175
26176 if (! S_IS_DEFINED (fixP->fx_addsy)
26177 || S_GET_SEGMENT (fixP->fx_addsy) != seg
26178 || S_IS_WEAK (fixP->fx_addsy))
26179 {
26180 as_bad_where (fixP->fx_file, fixP->fx_line,
26181 _("address calculation needs a strongly defined nearby symbol"));
26182 }
26183 else
26184 {
26185 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
26186
26187 /* Round up to the next 4-byte boundary. */
26188 if (v & 3)
26189 v = (v + 3) & ~ 3;
26190 else
26191 v += 4;
26192 v = S_GET_VALUE (fixP->fx_addsy) - v;
26193
26194 if (v & ~0x3fc)
26195 {
26196 as_bad_where (fixP->fx_file, fixP->fx_line,
26197 _("symbol too far away"));
26198 }
26199 else
26200 {
26201 fixP->fx_done = 1;
26202 value = v;
26203 }
26204 }
26205 }
26206
26207 if (subtract || value & ~0x3fc)
26208 as_bad_where (fixP->fx_file, fixP->fx_line,
26209 _("invalid immediate for address calculation (value = 0x%08lX)"),
26210 (unsigned long) (subtract ? - value : value));
26211 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
26212 newval |= rd << 8;
26213 newval |= value >> 2;
26214 }
26215 else if (rs == rd)
26216 {
26217 if (value & ~0xff)
26218 as_bad_where (fixP->fx_file, fixP->fx_line,
26219 _("immediate value out of range"));
26220 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
26221 newval |= (rd << 8) | value;
26222 }
26223 else
26224 {
26225 if (value & ~0x7)
26226 as_bad_where (fixP->fx_file, fixP->fx_line,
26227 _("immediate value out of range"));
26228 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
26229 newval |= rd | (rs << 3) | (value << 6);
26230 }
26231 }
26232 md_number_to_chars (buf, newval, THUMB_SIZE);
26233 break;
26234
26235 case BFD_RELOC_ARM_THUMB_IMM:
26236 newval = md_chars_to_number (buf, THUMB_SIZE);
26237 if (value < 0 || value > 255)
26238 as_bad_where (fixP->fx_file, fixP->fx_line,
26239 _("invalid immediate: %ld is out of range"),
26240 (long) value);
26241 newval |= value;
26242 md_number_to_chars (buf, newval, THUMB_SIZE);
26243 break;
26244
26245 case BFD_RELOC_ARM_THUMB_SHIFT:
26246 /* 5bit shift value (0..32). LSL cannot take 32. */
26247 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
26248 temp = newval & 0xf800;
26249 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
26250 as_bad_where (fixP->fx_file, fixP->fx_line,
26251 _("invalid shift value: %ld"), (long) value);
26252 /* Shifts of zero must be encoded as LSL. */
26253 if (value == 0)
26254 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
26255 /* Shifts of 32 are encoded as zero. */
26256 else if (value == 32)
26257 value = 0;
26258 newval |= value << 6;
26259 md_number_to_chars (buf, newval, THUMB_SIZE);
26260 break;
26261
26262 case BFD_RELOC_VTABLE_INHERIT:
26263 case BFD_RELOC_VTABLE_ENTRY:
26264 fixP->fx_done = 0;
26265 return;
26266
26267 case BFD_RELOC_ARM_MOVW:
26268 case BFD_RELOC_ARM_MOVT:
26269 case BFD_RELOC_ARM_THUMB_MOVW:
26270 case BFD_RELOC_ARM_THUMB_MOVT:
26271 if (fixP->fx_done || !seg->use_rela_p)
26272 {
26273 /* REL format relocations are limited to a 16-bit addend. */
26274 if (!fixP->fx_done)
26275 {
26276 if (value < -0x8000 || value > 0x7fff)
26277 as_bad_where (fixP->fx_file, fixP->fx_line,
26278 _("offset out of range"));
26279 }
26280 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
26281 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
26282 {
26283 value >>= 16;
26284 }
26285
26286 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
26287 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
26288 {
26289 newval = get_thumb32_insn (buf);
26290 newval &= 0xfbf08f00;
26291 newval |= (value & 0xf000) << 4;
26292 newval |= (value & 0x0800) << 15;
26293 newval |= (value & 0x0700) << 4;
26294 newval |= (value & 0x00ff);
26295 put_thumb32_insn (buf, newval);
26296 }
26297 else
26298 {
26299 newval = md_chars_to_number (buf, 4);
26300 newval &= 0xfff0f000;
26301 newval |= value & 0x0fff;
26302 newval |= (value & 0xf000) << 4;
26303 md_number_to_chars (buf, newval, 4);
26304 }
26305 }
26306 return;
26307
26308 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26309 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26310 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26311 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26312 gas_assert (!fixP->fx_done);
26313 {
26314 bfd_vma insn;
26315 bfd_boolean is_mov;
26316 bfd_vma encoded_addend = value;
26317
26318 /* Check that addend can be encoded in instruction. */
26319 if (!seg->use_rela_p && (value < 0 || value > 255))
26320 as_bad_where (fixP->fx_file, fixP->fx_line,
26321 _("the offset 0x%08lX is not representable"),
26322 (unsigned long) encoded_addend);
26323
26324 /* Extract the instruction. */
26325 insn = md_chars_to_number (buf, THUMB_SIZE);
26326 is_mov = (insn & 0xf800) == 0x2000;
26327
26328 /* Encode insn. */
26329 if (is_mov)
26330 {
26331 if (!seg->use_rela_p)
26332 insn |= encoded_addend;
26333 }
26334 else
26335 {
26336 int rd, rs;
26337
26338 /* Extract the instruction. */
26339 /* Encoding is the following
26340 0x8000 SUB
26341 0x00F0 Rd
26342 0x000F Rs
26343 */
26344 /* The following conditions must be true :
26345 - ADD
26346 - Rd == Rs
26347 - Rd <= 7
26348 */
26349 rd = (insn >> 4) & 0xf;
26350 rs = insn & 0xf;
26351 if ((insn & 0x8000) || (rd != rs) || rd > 7)
26352 as_bad_where (fixP->fx_file, fixP->fx_line,
26353 _("Unable to process relocation for thumb opcode: %lx"),
26354 (unsigned long) insn);
26355
26356 /* Encode as ADD immediate8 thumb 1 code. */
26357 insn = 0x3000 | (rd << 8);
26358
26359 /* Place the encoded addend into the first 8 bits of the
26360 instruction. */
26361 if (!seg->use_rela_p)
26362 insn |= encoded_addend;
26363 }
26364
26365 /* Update the instruction. */
26366 md_number_to_chars (buf, insn, THUMB_SIZE);
26367 }
26368 break;
26369
26370 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26371 case BFD_RELOC_ARM_ALU_PC_G0:
26372 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26373 case BFD_RELOC_ARM_ALU_PC_G1:
26374 case BFD_RELOC_ARM_ALU_PC_G2:
26375 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26376 case BFD_RELOC_ARM_ALU_SB_G0:
26377 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26378 case BFD_RELOC_ARM_ALU_SB_G1:
26379 case BFD_RELOC_ARM_ALU_SB_G2:
26380 gas_assert (!fixP->fx_done);
26381 if (!seg->use_rela_p)
26382 {
26383 bfd_vma insn;
26384 bfd_vma encoded_addend;
26385 bfd_vma addend_abs = llabs (value);
26386
26387 /* Check that the absolute value of the addend can be
26388 expressed as an 8-bit constant plus a rotation. */
26389 encoded_addend = encode_arm_immediate (addend_abs);
26390 if (encoded_addend == (unsigned int) FAIL)
26391 as_bad_where (fixP->fx_file, fixP->fx_line,
26392 _("the offset 0x%08lX is not representable"),
26393 (unsigned long) addend_abs);
26394
26395 /* Extract the instruction. */
26396 insn = md_chars_to_number (buf, INSN_SIZE);
26397
26398 /* If the addend is positive, use an ADD instruction.
26399 Otherwise use a SUB. Take care not to destroy the S bit. */
26400 insn &= 0xff1fffff;
26401 if (value < 0)
26402 insn |= 1 << 22;
26403 else
26404 insn |= 1 << 23;
26405
26406 /* Place the encoded addend into the first 12 bits of the
26407 instruction. */
26408 insn &= 0xfffff000;
26409 insn |= encoded_addend;
26410
26411 /* Update the instruction. */
26412 md_number_to_chars (buf, insn, INSN_SIZE);
26413 }
26414 break;
26415
26416 case BFD_RELOC_ARM_LDR_PC_G0:
26417 case BFD_RELOC_ARM_LDR_PC_G1:
26418 case BFD_RELOC_ARM_LDR_PC_G2:
26419 case BFD_RELOC_ARM_LDR_SB_G0:
26420 case BFD_RELOC_ARM_LDR_SB_G1:
26421 case BFD_RELOC_ARM_LDR_SB_G2:
26422 gas_assert (!fixP->fx_done);
26423 if (!seg->use_rela_p)
26424 {
26425 bfd_vma insn;
26426 bfd_vma addend_abs = llabs (value);
26427
26428 /* Check that the absolute value of the addend can be
26429 encoded in 12 bits. */
26430 if (addend_abs >= 0x1000)
26431 as_bad_where (fixP->fx_file, fixP->fx_line,
26432 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
26433 (unsigned long) addend_abs);
26434
26435 /* Extract the instruction. */
26436 insn = md_chars_to_number (buf, INSN_SIZE);
26437
26438 /* If the addend is negative, clear bit 23 of the instruction.
26439 Otherwise set it. */
26440 if (value < 0)
26441 insn &= ~(1 << 23);
26442 else
26443 insn |= 1 << 23;
26444
26445 /* Place the absolute value of the addend into the first 12 bits
26446 of the instruction. */
26447 insn &= 0xfffff000;
26448 insn |= addend_abs;
26449
26450 /* Update the instruction. */
26451 md_number_to_chars (buf, insn, INSN_SIZE);
26452 }
26453 break;
26454
26455 case BFD_RELOC_ARM_LDRS_PC_G0:
26456 case BFD_RELOC_ARM_LDRS_PC_G1:
26457 case BFD_RELOC_ARM_LDRS_PC_G2:
26458 case BFD_RELOC_ARM_LDRS_SB_G0:
26459 case BFD_RELOC_ARM_LDRS_SB_G1:
26460 case BFD_RELOC_ARM_LDRS_SB_G2:
26461 gas_assert (!fixP->fx_done);
26462 if (!seg->use_rela_p)
26463 {
26464 bfd_vma insn;
26465 bfd_vma addend_abs = llabs (value);
26466
26467 /* Check that the absolute value of the addend can be
26468 encoded in 8 bits. */
26469 if (addend_abs >= 0x100)
26470 as_bad_where (fixP->fx_file, fixP->fx_line,
26471 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26472 (unsigned long) addend_abs);
26473
26474 /* Extract the instruction. */
26475 insn = md_chars_to_number (buf, INSN_SIZE);
26476
26477 /* If the addend is negative, clear bit 23 of the instruction.
26478 Otherwise set it. */
26479 if (value < 0)
26480 insn &= ~(1 << 23);
26481 else
26482 insn |= 1 << 23;
26483
26484 /* Place the first four bits of the absolute value of the addend
26485 into the first 4 bits of the instruction, and the remaining
26486 four into bits 8 .. 11. */
26487 insn &= 0xfffff0f0;
26488 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
26489
26490 /* Update the instruction. */
26491 md_number_to_chars (buf, insn, INSN_SIZE);
26492 }
26493 break;
26494
26495 case BFD_RELOC_ARM_LDC_PC_G0:
26496 case BFD_RELOC_ARM_LDC_PC_G1:
26497 case BFD_RELOC_ARM_LDC_PC_G2:
26498 case BFD_RELOC_ARM_LDC_SB_G0:
26499 case BFD_RELOC_ARM_LDC_SB_G1:
26500 case BFD_RELOC_ARM_LDC_SB_G2:
26501 gas_assert (!fixP->fx_done);
26502 if (!seg->use_rela_p)
26503 {
26504 bfd_vma insn;
26505 bfd_vma addend_abs = llabs (value);
26506
26507 /* Check that the absolute value of the addend is a multiple of
26508 four and, when divided by four, fits in 8 bits. */
26509 if (addend_abs & 0x3)
26510 as_bad_where (fixP->fx_file, fixP->fx_line,
26511 _("bad offset 0x%08lX (must be word-aligned)"),
26512 (unsigned long) addend_abs);
26513
26514 if ((addend_abs >> 2) > 0xff)
26515 as_bad_where (fixP->fx_file, fixP->fx_line,
26516 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26517 (unsigned long) addend_abs);
26518
26519 /* Extract the instruction. */
26520 insn = md_chars_to_number (buf, INSN_SIZE);
26521
26522 /* If the addend is negative, clear bit 23 of the instruction.
26523 Otherwise set it. */
26524 if (value < 0)
26525 insn &= ~(1 << 23);
26526 else
26527 insn |= 1 << 23;
26528
26529 /* Place the addend (divided by four) into the first eight
26530 bits of the instruction. */
26531 insn &= 0xfffffff0;
26532 insn |= addend_abs >> 2;
26533
26534 /* Update the instruction. */
26535 md_number_to_chars (buf, insn, INSN_SIZE);
26536 }
26537 break;
26538
26539 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26540 if (fixP->fx_addsy
26541 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26542 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26543 && ARM_IS_FUNC (fixP->fx_addsy)
26544 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26545 {
26546 /* Force a relocation for a branch 5 bits wide. */
26547 fixP->fx_done = 0;
26548 }
26549 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
26550 as_bad_where (fixP->fx_file, fixP->fx_line,
26551 BAD_BRANCH_OFF);
26552
26553 if (fixP->fx_done || !seg->use_rela_p)
26554 {
26555 addressT boff = value >> 1;
26556
26557 newval = md_chars_to_number (buf, THUMB_SIZE);
26558 newval |= (boff << 7);
26559 md_number_to_chars (buf, newval, THUMB_SIZE);
26560 }
26561 break;
26562
26563 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26564 if (fixP->fx_addsy
26565 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26566 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26567 && ARM_IS_FUNC (fixP->fx_addsy)
26568 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26569 {
26570 fixP->fx_done = 0;
26571 }
26572 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
26573 as_bad_where (fixP->fx_file, fixP->fx_line,
26574 _("branch out of range"));
26575
26576 if (fixP->fx_done || !seg->use_rela_p)
26577 {
26578 newval = md_chars_to_number (buf, THUMB_SIZE);
26579
26580 addressT boff = ((newval & 0x0780) >> 7) << 1;
26581 addressT diff = value - boff;
26582
26583 if (diff == 4)
26584 {
26585 newval |= 1 << 1; /* T bit. */
26586 }
26587 else if (diff != 2)
26588 {
26589 as_bad_where (fixP->fx_file, fixP->fx_line,
26590 _("out of range label-relative fixup value"));
26591 }
26592 md_number_to_chars (buf, newval, THUMB_SIZE);
26593 }
26594 break;
26595
26596 case BFD_RELOC_ARM_THUMB_BF17:
26597 if (fixP->fx_addsy
26598 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26599 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26600 && ARM_IS_FUNC (fixP->fx_addsy)
26601 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26602 {
26603 /* Force a relocation for a branch 17 bits wide. */
26604 fixP->fx_done = 0;
26605 }
26606
26607 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
26608 as_bad_where (fixP->fx_file, fixP->fx_line,
26609 BAD_BRANCH_OFF);
26610
26611 if (fixP->fx_done || !seg->use_rela_p)
26612 {
26613 offsetT newval2;
26614 addressT immA, immB, immC;
26615
26616 immA = (value & 0x0001f000) >> 12;
26617 immB = (value & 0x00000ffc) >> 2;
26618 immC = (value & 0x00000002) >> 1;
26619
26620 newval = md_chars_to_number (buf, THUMB_SIZE);
26621 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26622 newval |= immA;
26623 newval2 |= (immC << 11) | (immB << 1);
26624 md_number_to_chars (buf, newval, THUMB_SIZE);
26625 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26626 }
26627 break;
26628
26629 case BFD_RELOC_ARM_THUMB_BF19:
26630 if (fixP->fx_addsy
26631 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26632 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26633 && ARM_IS_FUNC (fixP->fx_addsy)
26634 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26635 {
26636 /* Force a relocation for a branch 19 bits wide. */
26637 fixP->fx_done = 0;
26638 }
26639
26640 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
26641 as_bad_where (fixP->fx_file, fixP->fx_line,
26642 BAD_BRANCH_OFF);
26643
26644 if (fixP->fx_done || !seg->use_rela_p)
26645 {
26646 offsetT newval2;
26647 addressT immA, immB, immC;
26648
26649 immA = (value & 0x0007f000) >> 12;
26650 immB = (value & 0x00000ffc) >> 2;
26651 immC = (value & 0x00000002) >> 1;
26652
26653 newval = md_chars_to_number (buf, THUMB_SIZE);
26654 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26655 newval |= immA;
26656 newval2 |= (immC << 11) | (immB << 1);
26657 md_number_to_chars (buf, newval, THUMB_SIZE);
26658 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26659 }
26660 break;
26661
26662 case BFD_RELOC_ARM_THUMB_BF13:
26663 if (fixP->fx_addsy
26664 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26665 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26666 && ARM_IS_FUNC (fixP->fx_addsy)
26667 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26668 {
26669 /* Force a relocation for a branch 13 bits wide. */
26670 fixP->fx_done = 0;
26671 }
26672
26673 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
26674 as_bad_where (fixP->fx_file, fixP->fx_line,
26675 BAD_BRANCH_OFF);
26676
26677 if (fixP->fx_done || !seg->use_rela_p)
26678 {
26679 offsetT newval2;
26680 addressT immA, immB, immC;
26681
26682 immA = (value & 0x00001000) >> 12;
26683 immB = (value & 0x00000ffc) >> 2;
26684 immC = (value & 0x00000002) >> 1;
26685
26686 newval = md_chars_to_number (buf, THUMB_SIZE);
26687 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26688 newval |= immA;
26689 newval2 |= (immC << 11) | (immB << 1);
26690 md_number_to_chars (buf, newval, THUMB_SIZE);
26691 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26692 }
26693 break;
26694
26695 case BFD_RELOC_ARM_THUMB_LOOP12:
26696 if (fixP->fx_addsy
26697 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26698 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26699 && ARM_IS_FUNC (fixP->fx_addsy)
26700 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26701 {
26702 /* Force a relocation for a branch 12 bits wide. */
26703 fixP->fx_done = 0;
26704 }
26705
26706 bfd_vma insn = get_thumb32_insn (buf);
26707 /* le lr, <label> or le <label> */
26708 if (((insn & 0xffffffff) == 0xf00fc001)
26709 || ((insn & 0xffffffff) == 0xf02fc001))
26710 value = -value;
26711
26712 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
26713 as_bad_where (fixP->fx_file, fixP->fx_line,
26714 BAD_BRANCH_OFF);
26715 if (fixP->fx_done || !seg->use_rela_p)
26716 {
26717 addressT imml, immh;
26718
26719 immh = (value & 0x00000ffc) >> 2;
26720 imml = (value & 0x00000002) >> 1;
26721
26722 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26723 newval |= (imml << 11) | (immh << 1);
26724 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
26725 }
26726 break;
26727
26728 case BFD_RELOC_ARM_V4BX:
26729 /* This will need to go in the object file. */
26730 fixP->fx_done = 0;
26731 break;
26732
26733 case BFD_RELOC_UNUSED:
26734 default:
26735 as_bad_where (fixP->fx_file, fixP->fx_line,
26736 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
26737 }
26738 }
26739
26740 /* Translate internal representation of relocation info to BFD target
26741 format. */
26742
26743 arelent *
26744 tc_gen_reloc (asection *section, fixS *fixp)
26745 {
26746 arelent * reloc;
26747 bfd_reloc_code_real_type code;
26748
26749 reloc = XNEW (arelent);
26750
26751 reloc->sym_ptr_ptr = XNEW (asymbol *);
26752 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
26753 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
26754
26755 if (fixp->fx_pcrel)
26756 {
26757 if (section->use_rela_p)
26758 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
26759 else
26760 fixp->fx_offset = reloc->address;
26761 }
26762 reloc->addend = fixp->fx_offset;
26763
26764 switch (fixp->fx_r_type)
26765 {
26766 case BFD_RELOC_8:
26767 if (fixp->fx_pcrel)
26768 {
26769 code = BFD_RELOC_8_PCREL;
26770 break;
26771 }
26772 /* Fall through. */
26773
26774 case BFD_RELOC_16:
26775 if (fixp->fx_pcrel)
26776 {
26777 code = BFD_RELOC_16_PCREL;
26778 break;
26779 }
26780 /* Fall through. */
26781
26782 case BFD_RELOC_32:
26783 if (fixp->fx_pcrel)
26784 {
26785 code = BFD_RELOC_32_PCREL;
26786 break;
26787 }
26788 /* Fall through. */
26789
26790 case BFD_RELOC_ARM_MOVW:
26791 if (fixp->fx_pcrel)
26792 {
26793 code = BFD_RELOC_ARM_MOVW_PCREL;
26794 break;
26795 }
26796 /* Fall through. */
26797
26798 case BFD_RELOC_ARM_MOVT:
26799 if (fixp->fx_pcrel)
26800 {
26801 code = BFD_RELOC_ARM_MOVT_PCREL;
26802 break;
26803 }
26804 /* Fall through. */
26805
26806 case BFD_RELOC_ARM_THUMB_MOVW:
26807 if (fixp->fx_pcrel)
26808 {
26809 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
26810 break;
26811 }
26812 /* Fall through. */
26813
26814 case BFD_RELOC_ARM_THUMB_MOVT:
26815 if (fixp->fx_pcrel)
26816 {
26817 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
26818 break;
26819 }
26820 /* Fall through. */
26821
26822 case BFD_RELOC_NONE:
26823 case BFD_RELOC_ARM_PCREL_BRANCH:
26824 case BFD_RELOC_ARM_PCREL_BLX:
26825 case BFD_RELOC_RVA:
26826 case BFD_RELOC_THUMB_PCREL_BRANCH7:
26827 case BFD_RELOC_THUMB_PCREL_BRANCH9:
26828 case BFD_RELOC_THUMB_PCREL_BRANCH12:
26829 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26830 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26831 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26832 case BFD_RELOC_VTABLE_ENTRY:
26833 case BFD_RELOC_VTABLE_INHERIT:
26834 #ifdef TE_PE
26835 case BFD_RELOC_32_SECREL:
26836 #endif
26837 code = fixp->fx_r_type;
26838 break;
26839
26840 case BFD_RELOC_THUMB_PCREL_BLX:
26841 #ifdef OBJ_ELF
26842 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
26843 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
26844 else
26845 #endif
26846 code = BFD_RELOC_THUMB_PCREL_BLX;
26847 break;
26848
26849 case BFD_RELOC_ARM_LITERAL:
26850 case BFD_RELOC_ARM_HWLITERAL:
26851 /* If this is called then the a literal has
26852 been referenced across a section boundary. */
26853 as_bad_where (fixp->fx_file, fixp->fx_line,
26854 _("literal referenced across section boundary"));
26855 return NULL;
26856
26857 #ifdef OBJ_ELF
26858 case BFD_RELOC_ARM_TLS_CALL:
26859 case BFD_RELOC_ARM_THM_TLS_CALL:
26860 case BFD_RELOC_ARM_TLS_DESCSEQ:
26861 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
26862 case BFD_RELOC_ARM_GOT32:
26863 case BFD_RELOC_ARM_GOTOFF:
26864 case BFD_RELOC_ARM_GOT_PREL:
26865 case BFD_RELOC_ARM_PLT32:
26866 case BFD_RELOC_ARM_TARGET1:
26867 case BFD_RELOC_ARM_ROSEGREL32:
26868 case BFD_RELOC_ARM_SBREL32:
26869 case BFD_RELOC_ARM_PREL31:
26870 case BFD_RELOC_ARM_TARGET2:
26871 case BFD_RELOC_ARM_TLS_LDO32:
26872 case BFD_RELOC_ARM_PCREL_CALL:
26873 case BFD_RELOC_ARM_PCREL_JUMP:
26874 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26875 case BFD_RELOC_ARM_ALU_PC_G0:
26876 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26877 case BFD_RELOC_ARM_ALU_PC_G1:
26878 case BFD_RELOC_ARM_ALU_PC_G2:
26879 case BFD_RELOC_ARM_LDR_PC_G0:
26880 case BFD_RELOC_ARM_LDR_PC_G1:
26881 case BFD_RELOC_ARM_LDR_PC_G2:
26882 case BFD_RELOC_ARM_LDRS_PC_G0:
26883 case BFD_RELOC_ARM_LDRS_PC_G1:
26884 case BFD_RELOC_ARM_LDRS_PC_G2:
26885 case BFD_RELOC_ARM_LDC_PC_G0:
26886 case BFD_RELOC_ARM_LDC_PC_G1:
26887 case BFD_RELOC_ARM_LDC_PC_G2:
26888 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26889 case BFD_RELOC_ARM_ALU_SB_G0:
26890 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26891 case BFD_RELOC_ARM_ALU_SB_G1:
26892 case BFD_RELOC_ARM_ALU_SB_G2:
26893 case BFD_RELOC_ARM_LDR_SB_G0:
26894 case BFD_RELOC_ARM_LDR_SB_G1:
26895 case BFD_RELOC_ARM_LDR_SB_G2:
26896 case BFD_RELOC_ARM_LDRS_SB_G0:
26897 case BFD_RELOC_ARM_LDRS_SB_G1:
26898 case BFD_RELOC_ARM_LDRS_SB_G2:
26899 case BFD_RELOC_ARM_LDC_SB_G0:
26900 case BFD_RELOC_ARM_LDC_SB_G1:
26901 case BFD_RELOC_ARM_LDC_SB_G2:
26902 case BFD_RELOC_ARM_V4BX:
26903 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26904 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26905 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26906 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26907 case BFD_RELOC_ARM_GOTFUNCDESC:
26908 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
26909 case BFD_RELOC_ARM_FUNCDESC:
26910 case BFD_RELOC_ARM_THUMB_BF17:
26911 case BFD_RELOC_ARM_THUMB_BF19:
26912 case BFD_RELOC_ARM_THUMB_BF13:
26913 code = fixp->fx_r_type;
26914 break;
26915
26916 case BFD_RELOC_ARM_TLS_GOTDESC:
26917 case BFD_RELOC_ARM_TLS_GD32:
26918 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
26919 case BFD_RELOC_ARM_TLS_LE32:
26920 case BFD_RELOC_ARM_TLS_IE32:
26921 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
26922 case BFD_RELOC_ARM_TLS_LDM32:
26923 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
26924 /* BFD will include the symbol's address in the addend.
26925 But we don't want that, so subtract it out again here. */
26926 if (!S_IS_COMMON (fixp->fx_addsy))
26927 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
26928 code = fixp->fx_r_type;
26929 break;
26930 #endif
26931
26932 case BFD_RELOC_ARM_IMMEDIATE:
26933 as_bad_where (fixp->fx_file, fixp->fx_line,
26934 _("internal relocation (type: IMMEDIATE) not fixed up"));
26935 return NULL;
26936
26937 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
26938 as_bad_where (fixp->fx_file, fixp->fx_line,
26939 _("ADRL used for a symbol not defined in the same file"));
26940 return NULL;
26941
26942 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26943 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26944 case BFD_RELOC_ARM_THUMB_LOOP12:
26945 as_bad_where (fixp->fx_file, fixp->fx_line,
26946 _("%s used for a symbol not defined in the same file"),
26947 bfd_get_reloc_code_name (fixp->fx_r_type));
26948 return NULL;
26949
26950 case BFD_RELOC_ARM_OFFSET_IMM:
26951 if (section->use_rela_p)
26952 {
26953 code = fixp->fx_r_type;
26954 break;
26955 }
26956
26957 if (fixp->fx_addsy != NULL
26958 && !S_IS_DEFINED (fixp->fx_addsy)
26959 && S_IS_LOCAL (fixp->fx_addsy))
26960 {
26961 as_bad_where (fixp->fx_file, fixp->fx_line,
26962 _("undefined local label `%s'"),
26963 S_GET_NAME (fixp->fx_addsy));
26964 return NULL;
26965 }
26966
26967 as_bad_where (fixp->fx_file, fixp->fx_line,
26968 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26969 return NULL;
26970
26971 default:
26972 {
26973 const char * type;
26974
26975 switch (fixp->fx_r_type)
26976 {
26977 case BFD_RELOC_NONE: type = "NONE"; break;
26978 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
26979 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
26980 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
26981 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
26982 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
26983 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
26984 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
26985 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
26986 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
26987 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
26988 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
26989 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
26990 default: type = _("<unknown>"); break;
26991 }
26992 as_bad_where (fixp->fx_file, fixp->fx_line,
26993 _("cannot represent %s relocation in this object file format"),
26994 type);
26995 return NULL;
26996 }
26997 }
26998
26999 #ifdef OBJ_ELF
27000 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
27001 && GOT_symbol
27002 && fixp->fx_addsy == GOT_symbol)
27003 {
27004 code = BFD_RELOC_ARM_GOTPC;
27005 reloc->addend = fixp->fx_offset = reloc->address;
27006 }
27007 #endif
27008
27009 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
27010
27011 if (reloc->howto == NULL)
27012 {
27013 as_bad_where (fixp->fx_file, fixp->fx_line,
27014 _("cannot represent %s relocation in this object file format"),
27015 bfd_get_reloc_code_name (code));
27016 return NULL;
27017 }
27018
27019 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
27020 vtable entry to be used in the relocation's section offset. */
27021 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
27022 reloc->address = fixp->fx_offset;
27023
27024 return reloc;
27025 }
27026
27027 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
27028
27029 void
27030 cons_fix_new_arm (fragS * frag,
27031 int where,
27032 int size,
27033 expressionS * exp,
27034 bfd_reloc_code_real_type reloc)
27035 {
27036 int pcrel = 0;
27037
27038 /* Pick a reloc.
27039 FIXME: @@ Should look at CPU word size. */
27040 switch (size)
27041 {
27042 case 1:
27043 reloc = BFD_RELOC_8;
27044 break;
27045 case 2:
27046 reloc = BFD_RELOC_16;
27047 break;
27048 case 4:
27049 default:
27050 reloc = BFD_RELOC_32;
27051 break;
27052 case 8:
27053 reloc = BFD_RELOC_64;
27054 break;
27055 }
27056
27057 #ifdef TE_PE
27058 if (exp->X_op == O_secrel)
27059 {
27060 exp->X_op = O_symbol;
27061 reloc = BFD_RELOC_32_SECREL;
27062 }
27063 #endif
27064
27065 fix_new_exp (frag, where, size, exp, pcrel, reloc);
27066 }
27067
27068 #if defined (OBJ_COFF)
27069 void
27070 arm_validate_fix (fixS * fixP)
27071 {
27072 /* If the destination of the branch is a defined symbol which does not have
27073 the THUMB_FUNC attribute, then we must be calling a function which has
27074 the (interfacearm) attribute. We look for the Thumb entry point to that
27075 function and change the branch to refer to that function instead. */
27076 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
27077 && fixP->fx_addsy != NULL
27078 && S_IS_DEFINED (fixP->fx_addsy)
27079 && ! THUMB_IS_FUNC (fixP->fx_addsy))
27080 {
27081 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
27082 }
27083 }
27084 #endif
27085
27086
27087 int
27088 arm_force_relocation (struct fix * fixp)
27089 {
27090 #if defined (OBJ_COFF) && defined (TE_PE)
27091 if (fixp->fx_r_type == BFD_RELOC_RVA)
27092 return 1;
27093 #endif
27094
27095 /* In case we have a call or a branch to a function in ARM ISA mode from
27096 a thumb function or vice-versa force the relocation. These relocations
27097 are cleared off for some cores that might have blx and simple transformations
27098 are possible. */
27099
27100 #ifdef OBJ_ELF
27101 switch (fixp->fx_r_type)
27102 {
27103 case BFD_RELOC_ARM_PCREL_JUMP:
27104 case BFD_RELOC_ARM_PCREL_CALL:
27105 case BFD_RELOC_THUMB_PCREL_BLX:
27106 if (THUMB_IS_FUNC (fixp->fx_addsy))
27107 return 1;
27108 break;
27109
27110 case BFD_RELOC_ARM_PCREL_BLX:
27111 case BFD_RELOC_THUMB_PCREL_BRANCH25:
27112 case BFD_RELOC_THUMB_PCREL_BRANCH20:
27113 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27114 if (ARM_IS_FUNC (fixp->fx_addsy))
27115 return 1;
27116 break;
27117
27118 default:
27119 break;
27120 }
27121 #endif
27122
27123 /* Resolve these relocations even if the symbol is extern or weak.
27124 Technically this is probably wrong due to symbol preemption.
27125 In practice these relocations do not have enough range to be useful
27126 at dynamic link time, and some code (e.g. in the Linux kernel)
27127 expects these references to be resolved. */
27128 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
27129 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
27130 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
27131 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
27132 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
27133 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
27134 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
27135 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
27136 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
27137 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
27138 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
27139 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
27140 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
27141 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
27142 return 0;
27143
27144 /* Always leave these relocations for the linker. */
27145 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
27146 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
27147 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
27148 return 1;
27149
27150 /* Always generate relocations against function symbols. */
27151 if (fixp->fx_r_type == BFD_RELOC_32
27152 && fixp->fx_addsy
27153 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
27154 return 1;
27155
27156 return generic_force_reloc (fixp);
27157 }
27158
27159 #if defined (OBJ_ELF) || defined (OBJ_COFF)
27160 /* Relocations against function names must be left unadjusted,
27161 so that the linker can use this information to generate interworking
27162 stubs. The MIPS version of this function
27163 also prevents relocations that are mips-16 specific, but I do not
27164 know why it does this.
27165
27166 FIXME:
27167 There is one other problem that ought to be addressed here, but
27168 which currently is not: Taking the address of a label (rather
27169 than a function) and then later jumping to that address. Such
27170 addresses also ought to have their bottom bit set (assuming that
27171 they reside in Thumb code), but at the moment they will not. */
27172
27173 bfd_boolean
27174 arm_fix_adjustable (fixS * fixP)
27175 {
27176 if (fixP->fx_addsy == NULL)
27177 return 1;
27178
27179 /* Preserve relocations against symbols with function type. */
27180 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
27181 return FALSE;
27182
27183 if (THUMB_IS_FUNC (fixP->fx_addsy)
27184 && fixP->fx_subsy == NULL)
27185 return FALSE;
27186
27187 /* We need the symbol name for the VTABLE entries. */
27188 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
27189 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
27190 return FALSE;
27191
27192 /* Don't allow symbols to be discarded on GOT related relocs. */
27193 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
27194 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
27195 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
27196 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
27197 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
27198 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
27199 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
27200 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
27201 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
27202 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
27203 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
27204 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
27205 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
27206 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
27207 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
27208 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
27209 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
27210 return FALSE;
27211
27212 /* Similarly for group relocations. */
27213 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
27214 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
27215 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
27216 return FALSE;
27217
27218 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
27219 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
27220 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
27221 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
27222 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
27223 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
27224 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
27225 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
27226 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
27227 return FALSE;
27228
27229 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
27230 offsets, so keep these symbols. */
27231 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
27232 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
27233 return FALSE;
27234
27235 return TRUE;
27236 }
27237 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
27238
27239 #ifdef OBJ_ELF
27240 const char *
27241 elf32_arm_target_format (void)
27242 {
27243 #ifdef TE_SYMBIAN
27244 return (target_big_endian
27245 ? "elf32-bigarm-symbian"
27246 : "elf32-littlearm-symbian");
27247 #elif defined (TE_VXWORKS)
27248 return (target_big_endian
27249 ? "elf32-bigarm-vxworks"
27250 : "elf32-littlearm-vxworks");
27251 #elif defined (TE_NACL)
27252 return (target_big_endian
27253 ? "elf32-bigarm-nacl"
27254 : "elf32-littlearm-nacl");
27255 #else
27256 if (arm_fdpic)
27257 {
27258 if (target_big_endian)
27259 return "elf32-bigarm-fdpic";
27260 else
27261 return "elf32-littlearm-fdpic";
27262 }
27263 else
27264 {
27265 if (target_big_endian)
27266 return "elf32-bigarm";
27267 else
27268 return "elf32-littlearm";
27269 }
27270 #endif
27271 }
27272
27273 void
27274 armelf_frob_symbol (symbolS * symp,
27275 int * puntp)
27276 {
27277 elf_frob_symbol (symp, puntp);
27278 }
27279 #endif
27280
27281 /* MD interface: Finalization. */
27282
27283 void
27284 arm_cleanup (void)
27285 {
27286 literal_pool * pool;
27287
27288 /* Ensure that all the predication blocks are properly closed. */
27289 check_pred_blocks_finished ();
27290
27291 for (pool = list_of_pools; pool; pool = pool->next)
27292 {
27293 /* Put it at the end of the relevant section. */
27294 subseg_set (pool->section, pool->sub_section);
27295 #ifdef OBJ_ELF
27296 arm_elf_change_section ();
27297 #endif
27298 s_ltorg (0);
27299 }
27300 }
27301
27302 #ifdef OBJ_ELF
27303 /* Remove any excess mapping symbols generated for alignment frags in
27304 SEC. We may have created a mapping symbol before a zero byte
27305 alignment; remove it if there's a mapping symbol after the
27306 alignment. */
27307 static void
27308 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
27309 void *dummy ATTRIBUTE_UNUSED)
27310 {
27311 segment_info_type *seginfo = seg_info (sec);
27312 fragS *fragp;
27313
27314 if (seginfo == NULL || seginfo->frchainP == NULL)
27315 return;
27316
27317 for (fragp = seginfo->frchainP->frch_root;
27318 fragp != NULL;
27319 fragp = fragp->fr_next)
27320 {
27321 symbolS *sym = fragp->tc_frag_data.last_map;
27322 fragS *next = fragp->fr_next;
27323
27324 /* Variable-sized frags have been converted to fixed size by
27325 this point. But if this was variable-sized to start with,
27326 there will be a fixed-size frag after it. So don't handle
27327 next == NULL. */
27328 if (sym == NULL || next == NULL)
27329 continue;
27330
27331 if (S_GET_VALUE (sym) < next->fr_address)
27332 /* Not at the end of this frag. */
27333 continue;
27334 know (S_GET_VALUE (sym) == next->fr_address);
27335
27336 do
27337 {
27338 if (next->tc_frag_data.first_map != NULL)
27339 {
27340 /* Next frag starts with a mapping symbol. Discard this
27341 one. */
27342 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
27343 break;
27344 }
27345
27346 if (next->fr_next == NULL)
27347 {
27348 /* This mapping symbol is at the end of the section. Discard
27349 it. */
27350 know (next->fr_fix == 0 && next->fr_var == 0);
27351 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
27352 break;
27353 }
27354
27355 /* As long as we have empty frags without any mapping symbols,
27356 keep looking. */
27357 /* If the next frag is non-empty and does not start with a
27358 mapping symbol, then this mapping symbol is required. */
27359 if (next->fr_address != next->fr_next->fr_address)
27360 break;
27361
27362 next = next->fr_next;
27363 }
27364 while (next != NULL);
27365 }
27366 }
27367 #endif
27368
27369 /* Adjust the symbol table. This marks Thumb symbols as distinct from
27370 ARM ones. */
27371
27372 void
27373 arm_adjust_symtab (void)
27374 {
27375 #ifdef OBJ_COFF
27376 symbolS * sym;
27377
27378 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
27379 {
27380 if (ARM_IS_THUMB (sym))
27381 {
27382 if (THUMB_IS_FUNC (sym))
27383 {
27384 /* Mark the symbol as a Thumb function. */
27385 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
27386 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
27387 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
27388
27389 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
27390 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
27391 else
27392 as_bad (_("%s: unexpected function type: %d"),
27393 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
27394 }
27395 else switch (S_GET_STORAGE_CLASS (sym))
27396 {
27397 case C_EXT:
27398 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
27399 break;
27400 case C_STAT:
27401 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
27402 break;
27403 case C_LABEL:
27404 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
27405 break;
27406 default:
27407 /* Do nothing. */
27408 break;
27409 }
27410 }
27411
27412 if (ARM_IS_INTERWORK (sym))
27413 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
27414 }
27415 #endif
27416 #ifdef OBJ_ELF
27417 symbolS * sym;
27418 char bind;
27419
27420 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
27421 {
27422 if (ARM_IS_THUMB (sym))
27423 {
27424 elf_symbol_type * elf_sym;
27425
27426 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
27427 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
27428
27429 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
27430 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
27431 {
27432 /* If it's a .thumb_func, declare it as so,
27433 otherwise tag label as .code 16. */
27434 if (THUMB_IS_FUNC (sym))
27435 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
27436 ST_BRANCH_TO_THUMB);
27437 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27438 elf_sym->internal_elf_sym.st_info =
27439 ELF_ST_INFO (bind, STT_ARM_16BIT);
27440 }
27441 }
27442 }
27443
27444 /* Remove any overlapping mapping symbols generated by alignment frags. */
27445 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
27446 /* Now do generic ELF adjustments. */
27447 elf_adjust_symtab ();
27448 #endif
27449 }
27450
27451 /* MD interface: Initialization. */
27452
27453 static void
27454 set_constant_flonums (void)
27455 {
27456 int i;
27457
27458 for (i = 0; i < NUM_FLOAT_VALS; i++)
27459 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
27460 abort ();
27461 }
27462
27463 /* Auto-select Thumb mode if it's the only available instruction set for the
27464 given architecture. */
27465
27466 static void
27467 autoselect_thumb_from_cpu_variant (void)
27468 {
27469 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
27470 opcode_select (16);
27471 }
27472
27473 void
27474 md_begin (void)
27475 {
27476 unsigned mach;
27477 unsigned int i;
27478
27479 if ( (arm_ops_hsh = hash_new ()) == NULL
27480 || (arm_cond_hsh = hash_new ()) == NULL
27481 || (arm_vcond_hsh = hash_new ()) == NULL
27482 || (arm_shift_hsh = hash_new ()) == NULL
27483 || (arm_psr_hsh = hash_new ()) == NULL
27484 || (arm_v7m_psr_hsh = hash_new ()) == NULL
27485 || (arm_reg_hsh = hash_new ()) == NULL
27486 || (arm_reloc_hsh = hash_new ()) == NULL
27487 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
27488 as_fatal (_("virtual memory exhausted"));
27489
27490 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
27491 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
27492 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
27493 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
27494 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
27495 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
27496 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
27497 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
27498 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
27499 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
27500 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
27501 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
27502 (void *) (v7m_psrs + i));
27503 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
27504 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
27505 for (i = 0;
27506 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
27507 i++)
27508 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
27509 (void *) (barrier_opt_names + i));
27510 #ifdef OBJ_ELF
27511 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
27512 {
27513 struct reloc_entry * entry = reloc_names + i;
27514
27515 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
27516 /* This makes encode_branch() use the EABI versions of this relocation. */
27517 entry->reloc = BFD_RELOC_UNUSED;
27518
27519 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
27520 }
27521 #endif
27522
27523 set_constant_flonums ();
27524
27525 /* Set the cpu variant based on the command-line options. We prefer
27526 -mcpu= over -march= if both are set (as for GCC); and we prefer
27527 -mfpu= over any other way of setting the floating point unit.
27528 Use of legacy options with new options are faulted. */
27529 if (legacy_cpu)
27530 {
27531 if (mcpu_cpu_opt || march_cpu_opt)
27532 as_bad (_("use of old and new-style options to set CPU type"));
27533
27534 selected_arch = *legacy_cpu;
27535 }
27536 else if (mcpu_cpu_opt)
27537 {
27538 selected_arch = *mcpu_cpu_opt;
27539 selected_ext = *mcpu_ext_opt;
27540 }
27541 else if (march_cpu_opt)
27542 {
27543 selected_arch = *march_cpu_opt;
27544 selected_ext = *march_ext_opt;
27545 }
27546 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27547
27548 if (legacy_fpu)
27549 {
27550 if (mfpu_opt)
27551 as_bad (_("use of old and new-style options to set FPU type"));
27552
27553 selected_fpu = *legacy_fpu;
27554 }
27555 else if (mfpu_opt)
27556 selected_fpu = *mfpu_opt;
27557 else
27558 {
27559 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27560 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27561 /* Some environments specify a default FPU. If they don't, infer it
27562 from the processor. */
27563 if (mcpu_fpu_opt)
27564 selected_fpu = *mcpu_fpu_opt;
27565 else if (march_fpu_opt)
27566 selected_fpu = *march_fpu_opt;
27567 #else
27568 selected_fpu = fpu_default;
27569 #endif
27570 }
27571
27572 if (ARM_FEATURE_ZERO (selected_fpu))
27573 {
27574 if (!no_cpu_selected ())
27575 selected_fpu = fpu_default;
27576 else
27577 selected_fpu = fpu_arch_fpa;
27578 }
27579
27580 #ifdef CPU_DEFAULT
27581 if (ARM_FEATURE_ZERO (selected_arch))
27582 {
27583 selected_arch = cpu_default;
27584 selected_cpu = selected_arch;
27585 }
27586 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27587 #else
27588 /* Autodection of feature mode: allow all features in cpu_variant but leave
27589 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27590 after all instruction have been processed and we can decide what CPU
27591 should be selected. */
27592 if (ARM_FEATURE_ZERO (selected_arch))
27593 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27594 else
27595 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27596 #endif
27597
27598 autoselect_thumb_from_cpu_variant ();
27599
27600 arm_arch_used = thumb_arch_used = arm_arch_none;
27601
27602 #if defined OBJ_COFF || defined OBJ_ELF
27603 {
27604 unsigned int flags = 0;
27605
27606 #if defined OBJ_ELF
27607 flags = meabi_flags;
27608
27609 switch (meabi_flags)
27610 {
27611 case EF_ARM_EABI_UNKNOWN:
27612 #endif
27613 /* Set the flags in the private structure. */
27614 if (uses_apcs_26) flags |= F_APCS26;
27615 if (support_interwork) flags |= F_INTERWORK;
27616 if (uses_apcs_float) flags |= F_APCS_FLOAT;
27617 if (pic_code) flags |= F_PIC;
27618 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
27619 flags |= F_SOFT_FLOAT;
27620
27621 switch (mfloat_abi_opt)
27622 {
27623 case ARM_FLOAT_ABI_SOFT:
27624 case ARM_FLOAT_ABI_SOFTFP:
27625 flags |= F_SOFT_FLOAT;
27626 break;
27627
27628 case ARM_FLOAT_ABI_HARD:
27629 if (flags & F_SOFT_FLOAT)
27630 as_bad (_("hard-float conflicts with specified fpu"));
27631 break;
27632 }
27633
27634 /* Using pure-endian doubles (even if soft-float). */
27635 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
27636 flags |= F_VFP_FLOAT;
27637
27638 #if defined OBJ_ELF
27639 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
27640 flags |= EF_ARM_MAVERICK_FLOAT;
27641 break;
27642
27643 case EF_ARM_EABI_VER4:
27644 case EF_ARM_EABI_VER5:
27645 /* No additional flags to set. */
27646 break;
27647
27648 default:
27649 abort ();
27650 }
27651 #endif
27652 bfd_set_private_flags (stdoutput, flags);
27653
27654 /* We have run out flags in the COFF header to encode the
27655 status of ATPCS support, so instead we create a dummy,
27656 empty, debug section called .arm.atpcs. */
27657 if (atpcs)
27658 {
27659 asection * sec;
27660
27661 sec = bfd_make_section (stdoutput, ".arm.atpcs");
27662
27663 if (sec != NULL)
27664 {
27665 bfd_set_section_flags
27666 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
27667 bfd_set_section_size (stdoutput, sec, 0);
27668 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
27669 }
27670 }
27671 }
27672 #endif
27673
27674 /* Record the CPU type as well. */
27675 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
27676 mach = bfd_mach_arm_iWMMXt2;
27677 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
27678 mach = bfd_mach_arm_iWMMXt;
27679 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
27680 mach = bfd_mach_arm_XScale;
27681 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
27682 mach = bfd_mach_arm_ep9312;
27683 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
27684 mach = bfd_mach_arm_5TE;
27685 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
27686 {
27687 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27688 mach = bfd_mach_arm_5T;
27689 else
27690 mach = bfd_mach_arm_5;
27691 }
27692 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
27693 {
27694 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27695 mach = bfd_mach_arm_4T;
27696 else
27697 mach = bfd_mach_arm_4;
27698 }
27699 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
27700 mach = bfd_mach_arm_3M;
27701 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
27702 mach = bfd_mach_arm_3;
27703 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
27704 mach = bfd_mach_arm_2a;
27705 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
27706 mach = bfd_mach_arm_2;
27707 else
27708 mach = bfd_mach_arm_unknown;
27709
27710 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
27711 }
27712
27713 /* Command line processing. */
27714
27715 /* md_parse_option
27716 Invocation line includes a switch not recognized by the base assembler.
27717 See if it's a processor-specific option.
27718
27719 This routine is somewhat complicated by the need for backwards
27720 compatibility (since older releases of gcc can't be changed).
27721 The new options try to make the interface as compatible as
27722 possible with GCC.
27723
27724 New options (supported) are:
27725
27726 -mcpu=<cpu name> Assemble for selected processor
27727 -march=<architecture name> Assemble for selected architecture
27728 -mfpu=<fpu architecture> Assemble for selected FPU.
27729 -EB/-mbig-endian Big-endian
27730 -EL/-mlittle-endian Little-endian
27731 -k Generate PIC code
27732 -mthumb Start in Thumb mode
27733 -mthumb-interwork Code supports ARM/Thumb interworking
27734
27735 -m[no-]warn-deprecated Warn about deprecated features
27736 -m[no-]warn-syms Warn when symbols match instructions
27737
27738 For now we will also provide support for:
27739
27740 -mapcs-32 32-bit Program counter
27741 -mapcs-26 26-bit Program counter
27742 -macps-float Floats passed in FP registers
27743 -mapcs-reentrant Reentrant code
27744 -matpcs
27745 (sometime these will probably be replaced with -mapcs=<list of options>
27746 and -matpcs=<list of options>)
27747
27748 The remaining options are only supported for back-wards compatibility.
27749 Cpu variants, the arm part is optional:
27750 -m[arm]1 Currently not supported.
27751 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27752 -m[arm]3 Arm 3 processor
27753 -m[arm]6[xx], Arm 6 processors
27754 -m[arm]7[xx][t][[d]m] Arm 7 processors
27755 -m[arm]8[10] Arm 8 processors
27756 -m[arm]9[20][tdmi] Arm 9 processors
27757 -mstrongarm[110[0]] StrongARM processors
27758 -mxscale XScale processors
27759 -m[arm]v[2345[t[e]]] Arm architectures
27760 -mall All (except the ARM1)
27761 FP variants:
27762 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27763 -mfpe-old (No float load/store multiples)
27764 -mvfpxd VFP Single precision
27765 -mvfp All VFP
27766 -mno-fpu Disable all floating point instructions
27767
27768 The following CPU names are recognized:
27769 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27770 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27771 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27772 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27773 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27774 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27775 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27776
27777 */
27778
27779 const char * md_shortopts = "m:k";
27780
27781 #ifdef ARM_BI_ENDIAN
27782 #define OPTION_EB (OPTION_MD_BASE + 0)
27783 #define OPTION_EL (OPTION_MD_BASE + 1)
27784 #else
27785 #if TARGET_BYTES_BIG_ENDIAN
27786 #define OPTION_EB (OPTION_MD_BASE + 0)
27787 #else
27788 #define OPTION_EL (OPTION_MD_BASE + 1)
27789 #endif
27790 #endif
27791 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27792 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27793
27794 struct option md_longopts[] =
27795 {
27796 #ifdef OPTION_EB
27797 {"EB", no_argument, NULL, OPTION_EB},
27798 #endif
27799 #ifdef OPTION_EL
27800 {"EL", no_argument, NULL, OPTION_EL},
27801 #endif
27802 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
27803 #ifdef OBJ_ELF
27804 {"fdpic", no_argument, NULL, OPTION_FDPIC},
27805 #endif
27806 {NULL, no_argument, NULL, 0}
27807 };
27808
27809 size_t md_longopts_size = sizeof (md_longopts);
27810
27811 struct arm_option_table
27812 {
27813 const char * option; /* Option name to match. */
27814 const char * help; /* Help information. */
27815 int * var; /* Variable to change. */
27816 int value; /* What to change it to. */
27817 const char * deprecated; /* If non-null, print this message. */
27818 };
27819
27820 struct arm_option_table arm_opts[] =
27821 {
27822 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
27823 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
27824 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27825 &support_interwork, 1, NULL},
27826 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
27827 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
27828 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
27829 1, NULL},
27830 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
27831 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
27832 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
27833 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
27834 NULL},
27835
27836 /* These are recognized by the assembler, but have no affect on code. */
27837 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
27838 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
27839
27840 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
27841 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27842 &warn_on_deprecated, 0, NULL},
27843 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
27844 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
27845 {NULL, NULL, NULL, 0, NULL}
27846 };
27847
27848 struct arm_legacy_option_table
27849 {
27850 const char * option; /* Option name to match. */
27851 const arm_feature_set ** var; /* Variable to change. */
27852 const arm_feature_set value; /* What to change it to. */
27853 const char * deprecated; /* If non-null, print this message. */
27854 };
27855
27856 const struct arm_legacy_option_table arm_legacy_opts[] =
27857 {
27858 /* DON'T add any new processors to this list -- we want the whole list
27859 to go away... Add them to the processors table instead. */
27860 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27861 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27862 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27863 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27864 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27865 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27866 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27867 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27868 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27869 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27870 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27871 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27872 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27873 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27874 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27875 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27876 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27877 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27878 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27879 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27880 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27881 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27882 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27883 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27884 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27885 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27886 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27887 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27888 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27889 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27890 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27891 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27892 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27893 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27894 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27895 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27896 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27897 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27898 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27899 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27900 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27901 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27902 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27903 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27904 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27905 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27906 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27907 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27908 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27909 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27910 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27911 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27912 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27913 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27914 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27915 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27916 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27917 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27918 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27919 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27920 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27921 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27922 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27923 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27924 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27925 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27926 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27927 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27928 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
27929 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
27930 N_("use -mcpu=strongarm110")},
27931 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
27932 N_("use -mcpu=strongarm1100")},
27933 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
27934 N_("use -mcpu=strongarm1110")},
27935 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
27936 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
27937 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
27938
27939 /* Architecture variants -- don't add any more to this list either. */
27940 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27941 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27942 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27943 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27944 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27945 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27946 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27947 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27948 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27949 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27950 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27951 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27952 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27953 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27954 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27955 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27956 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27957 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27958
27959 /* Floating point variants -- don't add any more to this list either. */
27960 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
27961 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
27962 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
27963 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
27964 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27965
27966 {NULL, NULL, ARM_ARCH_NONE, NULL}
27967 };
27968
27969 struct arm_cpu_option_table
27970 {
27971 const char * name;
27972 size_t name_len;
27973 const arm_feature_set value;
27974 const arm_feature_set ext;
27975 /* For some CPUs we assume an FPU unless the user explicitly sets
27976 -mfpu=... */
27977 const arm_feature_set default_fpu;
27978 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27979 case. */
27980 const char * canonical_name;
27981 };
27982
27983 /* This list should, at a minimum, contain all the cpu names
27984 recognized by GCC. */
27985 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27986
27987 static const struct arm_cpu_option_table arm_cpus[] =
27988 {
27989 ARM_CPU_OPT ("all", NULL, ARM_ANY,
27990 ARM_ARCH_NONE,
27991 FPU_ARCH_FPA),
27992 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
27993 ARM_ARCH_NONE,
27994 FPU_ARCH_FPA),
27995 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
27996 ARM_ARCH_NONE,
27997 FPU_ARCH_FPA),
27998 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
27999 ARM_ARCH_NONE,
28000 FPU_ARCH_FPA),
28001 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
28002 ARM_ARCH_NONE,
28003 FPU_ARCH_FPA),
28004 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
28005 ARM_ARCH_NONE,
28006 FPU_ARCH_FPA),
28007 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
28008 ARM_ARCH_NONE,
28009 FPU_ARCH_FPA),
28010 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
28011 ARM_ARCH_NONE,
28012 FPU_ARCH_FPA),
28013 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
28014 ARM_ARCH_NONE,
28015 FPU_ARCH_FPA),
28016 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
28017 ARM_ARCH_NONE,
28018 FPU_ARCH_FPA),
28019 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
28020 ARM_ARCH_NONE,
28021 FPU_ARCH_FPA),
28022 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
28023 ARM_ARCH_NONE,
28024 FPU_ARCH_FPA),
28025 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
28026 ARM_ARCH_NONE,
28027 FPU_ARCH_FPA),
28028 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
28029 ARM_ARCH_NONE,
28030 FPU_ARCH_FPA),
28031 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
28032 ARM_ARCH_NONE,
28033 FPU_ARCH_FPA),
28034 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
28035 ARM_ARCH_NONE,
28036 FPU_ARCH_FPA),
28037 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
28038 ARM_ARCH_NONE,
28039 FPU_ARCH_FPA),
28040 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
28041 ARM_ARCH_NONE,
28042 FPU_ARCH_FPA),
28043 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
28044 ARM_ARCH_NONE,
28045 FPU_ARCH_FPA),
28046 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
28047 ARM_ARCH_NONE,
28048 FPU_ARCH_FPA),
28049 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
28050 ARM_ARCH_NONE,
28051 FPU_ARCH_FPA),
28052 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
28053 ARM_ARCH_NONE,
28054 FPU_ARCH_FPA),
28055 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
28056 ARM_ARCH_NONE,
28057 FPU_ARCH_FPA),
28058 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
28059 ARM_ARCH_NONE,
28060 FPU_ARCH_FPA),
28061 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
28062 ARM_ARCH_NONE,
28063 FPU_ARCH_FPA),
28064 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
28065 ARM_ARCH_NONE,
28066 FPU_ARCH_FPA),
28067 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
28068 ARM_ARCH_NONE,
28069 FPU_ARCH_FPA),
28070 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
28071 ARM_ARCH_NONE,
28072 FPU_ARCH_FPA),
28073 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
28074 ARM_ARCH_NONE,
28075 FPU_ARCH_FPA),
28076 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
28077 ARM_ARCH_NONE,
28078 FPU_ARCH_FPA),
28079 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
28080 ARM_ARCH_NONE,
28081 FPU_ARCH_FPA),
28082 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
28083 ARM_ARCH_NONE,
28084 FPU_ARCH_FPA),
28085 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
28086 ARM_ARCH_NONE,
28087 FPU_ARCH_FPA),
28088 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
28089 ARM_ARCH_NONE,
28090 FPU_ARCH_FPA),
28091 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
28092 ARM_ARCH_NONE,
28093 FPU_ARCH_FPA),
28094 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
28095 ARM_ARCH_NONE,
28096 FPU_ARCH_FPA),
28097 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
28098 ARM_ARCH_NONE,
28099 FPU_ARCH_FPA),
28100 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
28101 ARM_ARCH_NONE,
28102 FPU_ARCH_FPA),
28103 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
28104 ARM_ARCH_NONE,
28105 FPU_ARCH_FPA),
28106 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
28107 ARM_ARCH_NONE,
28108 FPU_ARCH_FPA),
28109 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
28110 ARM_ARCH_NONE,
28111 FPU_ARCH_FPA),
28112 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
28113 ARM_ARCH_NONE,
28114 FPU_ARCH_FPA),
28115 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
28116 ARM_ARCH_NONE,
28117 FPU_ARCH_FPA),
28118 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
28119 ARM_ARCH_NONE,
28120 FPU_ARCH_FPA),
28121 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
28122 ARM_ARCH_NONE,
28123 FPU_ARCH_FPA),
28124 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
28125 ARM_ARCH_NONE,
28126 FPU_ARCH_FPA),
28127
28128 /* For V5 or later processors we default to using VFP; but the user
28129 should really set the FPU type explicitly. */
28130 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
28131 ARM_ARCH_NONE,
28132 FPU_ARCH_VFP_V2),
28133 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
28134 ARM_ARCH_NONE,
28135 FPU_ARCH_VFP_V2),
28136 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
28137 ARM_ARCH_NONE,
28138 FPU_ARCH_VFP_V2),
28139 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
28140 ARM_ARCH_NONE,
28141 FPU_ARCH_VFP_V2),
28142 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
28143 ARM_ARCH_NONE,
28144 FPU_ARCH_VFP_V2),
28145 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
28146 ARM_ARCH_NONE,
28147 FPU_ARCH_VFP_V2),
28148 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
28149 ARM_ARCH_NONE,
28150 FPU_ARCH_VFP_V2),
28151 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
28152 ARM_ARCH_NONE,
28153 FPU_ARCH_VFP_V2),
28154 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
28155 ARM_ARCH_NONE,
28156 FPU_ARCH_VFP_V2),
28157 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
28158 ARM_ARCH_NONE,
28159 FPU_ARCH_VFP_V2),
28160 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
28161 ARM_ARCH_NONE,
28162 FPU_ARCH_VFP_V2),
28163 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
28164 ARM_ARCH_NONE,
28165 FPU_ARCH_VFP_V2),
28166 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
28167 ARM_ARCH_NONE,
28168 FPU_ARCH_VFP_V1),
28169 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
28170 ARM_ARCH_NONE,
28171 FPU_ARCH_VFP_V1),
28172 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
28173 ARM_ARCH_NONE,
28174 FPU_ARCH_VFP_V2),
28175 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
28176 ARM_ARCH_NONE,
28177 FPU_ARCH_VFP_V2),
28178 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
28179 ARM_ARCH_NONE,
28180 FPU_ARCH_VFP_V1),
28181 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
28182 ARM_ARCH_NONE,
28183 FPU_ARCH_VFP_V2),
28184 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
28185 ARM_ARCH_NONE,
28186 FPU_ARCH_VFP_V2),
28187 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
28188 ARM_ARCH_NONE,
28189 FPU_ARCH_VFP_V2),
28190 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
28191 ARM_ARCH_NONE,
28192 FPU_ARCH_VFP_V2),
28193 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
28194 ARM_ARCH_NONE,
28195 FPU_ARCH_VFP_V2),
28196 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
28197 ARM_ARCH_NONE,
28198 FPU_ARCH_VFP_V2),
28199 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
28200 ARM_ARCH_NONE,
28201 FPU_ARCH_VFP_V2),
28202 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
28203 ARM_ARCH_NONE,
28204 FPU_ARCH_VFP_V2),
28205 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
28206 ARM_ARCH_NONE,
28207 FPU_ARCH_VFP_V2),
28208 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
28209 ARM_ARCH_NONE,
28210 FPU_NONE),
28211 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
28212 ARM_ARCH_NONE,
28213 FPU_NONE),
28214 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
28215 ARM_ARCH_NONE,
28216 FPU_ARCH_VFP_V2),
28217 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
28218 ARM_ARCH_NONE,
28219 FPU_ARCH_VFP_V2),
28220 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
28221 ARM_ARCH_NONE,
28222 FPU_ARCH_VFP_V2),
28223 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
28224 ARM_ARCH_NONE,
28225 FPU_NONE),
28226 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
28227 ARM_ARCH_NONE,
28228 FPU_NONE),
28229 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
28230 ARM_ARCH_NONE,
28231 FPU_ARCH_VFP_V2),
28232 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
28233 ARM_ARCH_NONE,
28234 FPU_NONE),
28235 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
28236 ARM_ARCH_NONE,
28237 FPU_ARCH_VFP_V2),
28238 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
28239 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28240 FPU_NONE),
28241 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
28242 ARM_ARCH_NONE,
28243 FPU_ARCH_NEON_VFP_V4),
28244 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
28245 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28246 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
28247 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
28248 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28249 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
28250 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
28251 ARM_ARCH_NONE,
28252 FPU_ARCH_NEON_VFP_V4),
28253 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
28254 ARM_ARCH_NONE,
28255 FPU_ARCH_NEON_VFP_V4),
28256 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
28257 ARM_ARCH_NONE,
28258 FPU_ARCH_NEON_VFP_V4),
28259 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
28260 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28261 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28262 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
28263 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28264 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28265 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
28266 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28267 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28268 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
28269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28270 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28271 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
28272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28273 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28274 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
28275 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28276 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28277 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
28278 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28279 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28280 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
28281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28282 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28283 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
28284 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28285 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28286 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
28287 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28288 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28289 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
28290 ARM_ARCH_NONE,
28291 FPU_NONE),
28292 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
28293 ARM_ARCH_NONE,
28294 FPU_ARCH_VFP_V3D16),
28295 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
28296 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28297 FPU_NONE),
28298 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
28299 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28300 FPU_ARCH_VFP_V3D16),
28301 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
28302 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28303 FPU_ARCH_VFP_V3D16),
28304 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
28305 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28306 FPU_ARCH_NEON_VFP_ARMV8),
28307 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
28308 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28309 FPU_NONE),
28310 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
28311 ARM_ARCH_NONE,
28312 FPU_NONE),
28313 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
28314 ARM_ARCH_NONE,
28315 FPU_NONE),
28316 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
28317 ARM_ARCH_NONE,
28318 FPU_NONE),
28319 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
28320 ARM_ARCH_NONE,
28321 FPU_NONE),
28322 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
28323 ARM_ARCH_NONE,
28324 FPU_NONE),
28325 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
28326 ARM_ARCH_NONE,
28327 FPU_NONE),
28328 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
28329 ARM_ARCH_NONE,
28330 FPU_NONE),
28331 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
28332 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28333 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28334 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
28335 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28336 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28337 /* ??? XSCALE is really an architecture. */
28338 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
28339 ARM_ARCH_NONE,
28340 FPU_ARCH_VFP_V2),
28341
28342 /* ??? iwmmxt is not a processor. */
28343 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
28344 ARM_ARCH_NONE,
28345 FPU_ARCH_VFP_V2),
28346 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
28347 ARM_ARCH_NONE,
28348 FPU_ARCH_VFP_V2),
28349 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
28350 ARM_ARCH_NONE,
28351 FPU_ARCH_VFP_V2),
28352
28353 /* Maverick. */
28354 ARM_CPU_OPT ("ep9312", "ARM920T",
28355 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
28356 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
28357
28358 /* Marvell processors. */
28359 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
28360 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28361 FPU_ARCH_VFP_V3D16),
28362 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
28363 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28364 FPU_ARCH_NEON_VFP_V4),
28365
28366 /* APM X-Gene family. */
28367 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
28368 ARM_ARCH_NONE,
28369 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28370 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
28371 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28372 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28373
28374 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28375 };
28376 #undef ARM_CPU_OPT
28377
28378 struct arm_ext_table
28379 {
28380 const char * name;
28381 size_t name_len;
28382 const arm_feature_set merge;
28383 const arm_feature_set clear;
28384 };
28385
28386 struct arm_arch_option_table
28387 {
28388 const char * name;
28389 size_t name_len;
28390 const arm_feature_set value;
28391 const arm_feature_set default_fpu;
28392 const struct arm_ext_table * ext_table;
28393 };
28394
28395 /* Used to add support for +E and +noE extension. */
28396 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
28397 /* Used to add support for a +E extension. */
28398 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
28399 /* Used to add support for a +noE extension. */
28400 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
28401
28402 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
28403 ~0 & ~FPU_ENDIAN_PURE)
28404
28405 static const struct arm_ext_table armv5te_ext_table[] =
28406 {
28407 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
28408 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28409 };
28410
28411 static const struct arm_ext_table armv7_ext_table[] =
28412 {
28413 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28414 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28415 };
28416
28417 static const struct arm_ext_table armv7ve_ext_table[] =
28418 {
28419 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
28420 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
28421 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
28422 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28423 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
28424 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
28425 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
28426
28427 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
28428 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
28429
28430 /* Aliases for +simd. */
28431 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
28432
28433 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28434 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28435 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
28436
28437 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28438 };
28439
28440 static const struct arm_ext_table armv7a_ext_table[] =
28441 {
28442 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28443 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28444 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
28445 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28446 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
28447 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
28448 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
28449
28450 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
28451 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
28452
28453 /* Aliases for +simd. */
28454 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28455 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28456
28457 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
28458 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
28459
28460 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
28461 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
28462 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28463 };
28464
28465 static const struct arm_ext_table armv7r_ext_table[] =
28466 {
28467 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
28468 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
28469 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28470 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28471 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
28472 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28473 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28474 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
28475 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28476 };
28477
28478 static const struct arm_ext_table armv7em_ext_table[] =
28479 {
28480 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
28481 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28482 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
28483 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
28484 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28485 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
28486 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28487 };
28488
28489 static const struct arm_ext_table armv8a_ext_table[] =
28490 {
28491 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28492 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28493 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28494 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28495
28496 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28497 should use the +simd option to turn on FP. */
28498 ARM_REMOVE ("fp", ALL_FP),
28499 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28500 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28501 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28502 };
28503
28504
28505 static const struct arm_ext_table armv81a_ext_table[] =
28506 {
28507 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28508 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28509 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28510
28511 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28512 should use the +simd option to turn on FP. */
28513 ARM_REMOVE ("fp", ALL_FP),
28514 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28515 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28516 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28517 };
28518
28519 static const struct arm_ext_table armv82a_ext_table[] =
28520 {
28521 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28522 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
28523 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
28524 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28525 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28526 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28527
28528 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28529 should use the +simd option to turn on FP. */
28530 ARM_REMOVE ("fp", ALL_FP),
28531 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28532 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28533 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28534 };
28535
28536 static const struct arm_ext_table armv84a_ext_table[] =
28537 {
28538 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28539 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28540 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28541 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28542
28543 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28544 should use the +simd option to turn on FP. */
28545 ARM_REMOVE ("fp", ALL_FP),
28546 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28547 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28548 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28549 };
28550
28551 static const struct arm_ext_table armv85a_ext_table[] =
28552 {
28553 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28554 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28555 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28556 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28557
28558 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28559 should use the +simd option to turn on FP. */
28560 ARM_REMOVE ("fp", ALL_FP),
28561 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28562 };
28563
28564 static const struct arm_ext_table armv8m_main_ext_table[] =
28565 {
28566 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28567 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28568 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
28569 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28570 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28571 };
28572
28573 static const struct arm_ext_table armv8_1m_main_ext_table[] =
28574 {
28575 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28576 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28577 ARM_EXT ("fp",
28578 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28579 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
28580 ALL_FP),
28581 ARM_ADD ("fp.dp",
28582 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28583 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28584 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
28585 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
28586 ARM_ADD ("mve.fp",
28587 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28588 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
28589 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28590 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28591 };
28592
28593 static const struct arm_ext_table armv8r_ext_table[] =
28594 {
28595 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28596 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28597 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28598 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28599 ARM_REMOVE ("fp", ALL_FP),
28600 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
28601 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28602 };
28603
28604 /* This list should, at a minimum, contain all the architecture names
28605 recognized by GCC. */
28606 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28607 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28608 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28609
28610 static const struct arm_arch_option_table arm_archs[] =
28611 {
28612 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
28613 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
28614 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
28615 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
28616 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
28617 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
28618 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
28619 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
28620 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
28621 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
28622 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
28623 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
28624 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
28625 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
28626 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
28627 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
28628 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
28629 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28630 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28631 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
28632 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
28633 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28634 kept to preserve existing behaviour. */
28635 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28636 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28637 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
28638 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
28639 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
28640 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28641 kept to preserve existing behaviour. */
28642 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28643 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28644 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
28645 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
28646 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
28647 /* The official spelling of the ARMv7 profile variants is the dashed form.
28648 Accept the non-dashed form for compatibility with old toolchains. */
28649 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28650 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
28651 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28652 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28653 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28654 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28655 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28656 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
28657 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
28658 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
28659 armv8m_main),
28660 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
28661 armv8_1m_main),
28662 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
28663 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
28664 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
28665 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
28666 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
28667 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
28668 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
28669 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
28670 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
28671 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
28672 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28673 };
28674 #undef ARM_ARCH_OPT
28675
28676 /* ISA extensions in the co-processor and main instruction set space. */
28677
28678 struct arm_option_extension_value_table
28679 {
28680 const char * name;
28681 size_t name_len;
28682 const arm_feature_set merge_value;
28683 const arm_feature_set clear_value;
28684 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28685 indicates that an extension is available for all architectures while
28686 ARM_ANY marks an empty entry. */
28687 const arm_feature_set allowed_archs[2];
28688 };
28689
28690 /* The following table must be in alphabetical order with a NULL last entry. */
28691
28692 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28693 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28694
28695 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28696 use the context sensitive approach using arm_ext_table's. */
28697 static const struct arm_option_extension_value_table arm_extensions[] =
28698 {
28699 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28700 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28701 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28702 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
28703 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28704 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
28705 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
28706 ARM_ARCH_V8_2A),
28707 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28708 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28709 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
28710 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
28711 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28712 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28713 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28714 ARM_ARCH_V8_2A),
28715 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28716 | ARM_EXT2_FP16_FML),
28717 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28718 | ARM_EXT2_FP16_FML),
28719 ARM_ARCH_V8_2A),
28720 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28721 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28722 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28723 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28724 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28725 Thumb divide instruction. Due to this having the same name as the
28726 previous entry, this will be ignored when doing command-line parsing and
28727 only considered by build attribute selection code. */
28728 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28729 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28730 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
28731 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
28732 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
28733 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
28734 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
28735 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
28736 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
28737 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28738 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28739 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28740 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28741 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28742 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28743 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
28744 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
28745 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
28746 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28747 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28748 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28749 ARM_ARCH_V8A),
28750 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
28751 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
28752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28753 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
28754 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
28755 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28756 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28757 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28758 ARM_ARCH_V8A),
28759 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28760 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28761 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
28762 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28763 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
28764 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
28765 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28766 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
28767 | ARM_EXT_DIV),
28768 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
28769 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28770 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
28771 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
28772 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
28773 };
28774 #undef ARM_EXT_OPT
28775
28776 /* ISA floating-point and Advanced SIMD extensions. */
28777 struct arm_option_fpu_value_table
28778 {
28779 const char * name;
28780 const arm_feature_set value;
28781 };
28782
28783 /* This list should, at a minimum, contain all the fpu names
28784 recognized by GCC. */
28785 static const struct arm_option_fpu_value_table arm_fpus[] =
28786 {
28787 {"softfpa", FPU_NONE},
28788 {"fpe", FPU_ARCH_FPE},
28789 {"fpe2", FPU_ARCH_FPE},
28790 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
28791 {"fpa", FPU_ARCH_FPA},
28792 {"fpa10", FPU_ARCH_FPA},
28793 {"fpa11", FPU_ARCH_FPA},
28794 {"arm7500fe", FPU_ARCH_FPA},
28795 {"softvfp", FPU_ARCH_VFP},
28796 {"softvfp+vfp", FPU_ARCH_VFP_V2},
28797 {"vfp", FPU_ARCH_VFP_V2},
28798 {"vfp9", FPU_ARCH_VFP_V2},
28799 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
28800 {"vfp10", FPU_ARCH_VFP_V2},
28801 {"vfp10-r0", FPU_ARCH_VFP_V1},
28802 {"vfpxd", FPU_ARCH_VFP_V1xD},
28803 {"vfpv2", FPU_ARCH_VFP_V2},
28804 {"vfpv3", FPU_ARCH_VFP_V3},
28805 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
28806 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
28807 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
28808 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
28809 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
28810 {"arm1020t", FPU_ARCH_VFP_V1},
28811 {"arm1020e", FPU_ARCH_VFP_V2},
28812 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
28813 {"arm1136jf-s", FPU_ARCH_VFP_V2},
28814 {"maverick", FPU_ARCH_MAVERICK},
28815 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28816 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28817 {"neon-fp16", FPU_ARCH_NEON_FP16},
28818 {"vfpv4", FPU_ARCH_VFP_V4},
28819 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
28820 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
28821 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
28822 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
28823 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
28824 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
28825 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
28826 {"crypto-neon-fp-armv8",
28827 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
28828 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
28829 {"crypto-neon-fp-armv8.1",
28830 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
28831 {NULL, ARM_ARCH_NONE}
28832 };
28833
28834 struct arm_option_value_table
28835 {
28836 const char *name;
28837 long value;
28838 };
28839
28840 static const struct arm_option_value_table arm_float_abis[] =
28841 {
28842 {"hard", ARM_FLOAT_ABI_HARD},
28843 {"softfp", ARM_FLOAT_ABI_SOFTFP},
28844 {"soft", ARM_FLOAT_ABI_SOFT},
28845 {NULL, 0}
28846 };
28847
28848 #ifdef OBJ_ELF
28849 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28850 static const struct arm_option_value_table arm_eabis[] =
28851 {
28852 {"gnu", EF_ARM_EABI_UNKNOWN},
28853 {"4", EF_ARM_EABI_VER4},
28854 {"5", EF_ARM_EABI_VER5},
28855 {NULL, 0}
28856 };
28857 #endif
28858
28859 struct arm_long_option_table
28860 {
28861 const char * option; /* Substring to match. */
28862 const char * help; /* Help information. */
28863 int (* func) (const char * subopt); /* Function to decode sub-option. */
28864 const char * deprecated; /* If non-null, print this message. */
28865 };
28866
28867 static bfd_boolean
28868 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
28869 arm_feature_set *ext_set,
28870 const struct arm_ext_table *ext_table)
28871 {
28872 /* We insist on extensions being specified in alphabetical order, and with
28873 extensions being added before being removed. We achieve this by having
28874 the global ARM_EXTENSIONS table in alphabetical order, and using the
28875 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28876 or removing it (0) and only allowing it to change in the order
28877 -1 -> 1 -> 0. */
28878 const struct arm_option_extension_value_table * opt = NULL;
28879 const arm_feature_set arm_any = ARM_ANY;
28880 int adding_value = -1;
28881
28882 while (str != NULL && *str != 0)
28883 {
28884 const char *ext;
28885 size_t len;
28886
28887 if (*str != '+')
28888 {
28889 as_bad (_("invalid architectural extension"));
28890 return FALSE;
28891 }
28892
28893 str++;
28894 ext = strchr (str, '+');
28895
28896 if (ext != NULL)
28897 len = ext - str;
28898 else
28899 len = strlen (str);
28900
28901 if (len >= 2 && strncmp (str, "no", 2) == 0)
28902 {
28903 if (adding_value != 0)
28904 {
28905 adding_value = 0;
28906 opt = arm_extensions;
28907 }
28908
28909 len -= 2;
28910 str += 2;
28911 }
28912 else if (len > 0)
28913 {
28914 if (adding_value == -1)
28915 {
28916 adding_value = 1;
28917 opt = arm_extensions;
28918 }
28919 else if (adding_value != 1)
28920 {
28921 as_bad (_("must specify extensions to add before specifying "
28922 "those to remove"));
28923 return FALSE;
28924 }
28925 }
28926
28927 if (len == 0)
28928 {
28929 as_bad (_("missing architectural extension"));
28930 return FALSE;
28931 }
28932
28933 gas_assert (adding_value != -1);
28934 gas_assert (opt != NULL);
28935
28936 if (ext_table != NULL)
28937 {
28938 const struct arm_ext_table * ext_opt = ext_table;
28939 bfd_boolean found = FALSE;
28940 for (; ext_opt->name != NULL; ext_opt++)
28941 if (ext_opt->name_len == len
28942 && strncmp (ext_opt->name, str, len) == 0)
28943 {
28944 if (adding_value)
28945 {
28946 if (ARM_FEATURE_ZERO (ext_opt->merge))
28947 /* TODO: Option not supported. When we remove the
28948 legacy table this case should error out. */
28949 continue;
28950
28951 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
28952 }
28953 else
28954 {
28955 if (ARM_FEATURE_ZERO (ext_opt->clear))
28956 /* TODO: Option not supported. When we remove the
28957 legacy table this case should error out. */
28958 continue;
28959 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
28960 }
28961 found = TRUE;
28962 break;
28963 }
28964 if (found)
28965 {
28966 str = ext;
28967 continue;
28968 }
28969 }
28970
28971 /* Scan over the options table trying to find an exact match. */
28972 for (; opt->name != NULL; opt++)
28973 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28974 {
28975 int i, nb_allowed_archs =
28976 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28977 /* Check we can apply the extension to this architecture. */
28978 for (i = 0; i < nb_allowed_archs; i++)
28979 {
28980 /* Empty entry. */
28981 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
28982 continue;
28983 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
28984 break;
28985 }
28986 if (i == nb_allowed_archs)
28987 {
28988 as_bad (_("extension does not apply to the base architecture"));
28989 return FALSE;
28990 }
28991
28992 /* Add or remove the extension. */
28993 if (adding_value)
28994 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
28995 else
28996 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
28997
28998 /* Allowing Thumb division instructions for ARMv7 in autodetection
28999 rely on this break so that duplicate extensions (extensions
29000 with the same name as a previous extension in the list) are not
29001 considered for command-line parsing. */
29002 break;
29003 }
29004
29005 if (opt->name == NULL)
29006 {
29007 /* Did we fail to find an extension because it wasn't specified in
29008 alphabetical order, or because it does not exist? */
29009
29010 for (opt = arm_extensions; opt->name != NULL; opt++)
29011 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
29012 break;
29013
29014 if (opt->name == NULL)
29015 as_bad (_("unknown architectural extension `%s'"), str);
29016 else
29017 as_bad (_("architectural extensions must be specified in "
29018 "alphabetical order"));
29019
29020 return FALSE;
29021 }
29022 else
29023 {
29024 /* We should skip the extension we've just matched the next time
29025 round. */
29026 opt++;
29027 }
29028
29029 str = ext;
29030 };
29031
29032 return TRUE;
29033 }
29034
29035 static bfd_boolean
29036 arm_parse_cpu (const char *str)
29037 {
29038 const struct arm_cpu_option_table *opt;
29039 const char *ext = strchr (str, '+');
29040 size_t len;
29041
29042 if (ext != NULL)
29043 len = ext - str;
29044 else
29045 len = strlen (str);
29046
29047 if (len == 0)
29048 {
29049 as_bad (_("missing cpu name `%s'"), str);
29050 return FALSE;
29051 }
29052
29053 for (opt = arm_cpus; opt->name != NULL; opt++)
29054 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
29055 {
29056 mcpu_cpu_opt = &opt->value;
29057 if (mcpu_ext_opt == NULL)
29058 mcpu_ext_opt = XNEW (arm_feature_set);
29059 *mcpu_ext_opt = opt->ext;
29060 mcpu_fpu_opt = &opt->default_fpu;
29061 if (opt->canonical_name)
29062 {
29063 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
29064 strcpy (selected_cpu_name, opt->canonical_name);
29065 }
29066 else
29067 {
29068 size_t i;
29069
29070 if (len >= sizeof selected_cpu_name)
29071 len = (sizeof selected_cpu_name) - 1;
29072
29073 for (i = 0; i < len; i++)
29074 selected_cpu_name[i] = TOUPPER (opt->name[i]);
29075 selected_cpu_name[i] = 0;
29076 }
29077
29078 if (ext != NULL)
29079 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
29080
29081 return TRUE;
29082 }
29083
29084 as_bad (_("unknown cpu `%s'"), str);
29085 return FALSE;
29086 }
29087
29088 static bfd_boolean
29089 arm_parse_arch (const char *str)
29090 {
29091 const struct arm_arch_option_table *opt;
29092 const char *ext = strchr (str, '+');
29093 size_t len;
29094
29095 if (ext != NULL)
29096 len = ext - str;
29097 else
29098 len = strlen (str);
29099
29100 if (len == 0)
29101 {
29102 as_bad (_("missing architecture name `%s'"), str);
29103 return FALSE;
29104 }
29105
29106 for (opt = arm_archs; opt->name != NULL; opt++)
29107 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
29108 {
29109 march_cpu_opt = &opt->value;
29110 if (march_ext_opt == NULL)
29111 march_ext_opt = XNEW (arm_feature_set);
29112 *march_ext_opt = arm_arch_none;
29113 march_fpu_opt = &opt->default_fpu;
29114 strcpy (selected_cpu_name, opt->name);
29115
29116 if (ext != NULL)
29117 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
29118 opt->ext_table);
29119
29120 return TRUE;
29121 }
29122
29123 as_bad (_("unknown architecture `%s'\n"), str);
29124 return FALSE;
29125 }
29126
29127 static bfd_boolean
29128 arm_parse_fpu (const char * str)
29129 {
29130 const struct arm_option_fpu_value_table * opt;
29131
29132 for (opt = arm_fpus; opt->name != NULL; opt++)
29133 if (streq (opt->name, str))
29134 {
29135 mfpu_opt = &opt->value;
29136 return TRUE;
29137 }
29138
29139 as_bad (_("unknown floating point format `%s'\n"), str);
29140 return FALSE;
29141 }
29142
29143 static bfd_boolean
29144 arm_parse_float_abi (const char * str)
29145 {
29146 const struct arm_option_value_table * opt;
29147
29148 for (opt = arm_float_abis; opt->name != NULL; opt++)
29149 if (streq (opt->name, str))
29150 {
29151 mfloat_abi_opt = opt->value;
29152 return TRUE;
29153 }
29154
29155 as_bad (_("unknown floating point abi `%s'\n"), str);
29156 return FALSE;
29157 }
29158
29159 #ifdef OBJ_ELF
29160 static bfd_boolean
29161 arm_parse_eabi (const char * str)
29162 {
29163 const struct arm_option_value_table *opt;
29164
29165 for (opt = arm_eabis; opt->name != NULL; opt++)
29166 if (streq (opt->name, str))
29167 {
29168 meabi_flags = opt->value;
29169 return TRUE;
29170 }
29171 as_bad (_("unknown EABI `%s'\n"), str);
29172 return FALSE;
29173 }
29174 #endif
29175
29176 static bfd_boolean
29177 arm_parse_it_mode (const char * str)
29178 {
29179 bfd_boolean ret = TRUE;
29180
29181 if (streq ("arm", str))
29182 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
29183 else if (streq ("thumb", str))
29184 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
29185 else if (streq ("always", str))
29186 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
29187 else if (streq ("never", str))
29188 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
29189 else
29190 {
29191 as_bad (_("unknown implicit IT mode `%s', should be "\
29192 "arm, thumb, always, or never."), str);
29193 ret = FALSE;
29194 }
29195
29196 return ret;
29197 }
29198
29199 static bfd_boolean
29200 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
29201 {
29202 codecomposer_syntax = TRUE;
29203 arm_comment_chars[0] = ';';
29204 arm_line_separator_chars[0] = 0;
29205 return TRUE;
29206 }
29207
29208 struct arm_long_option_table arm_long_opts[] =
29209 {
29210 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
29211 arm_parse_cpu, NULL},
29212 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
29213 arm_parse_arch, NULL},
29214 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
29215 arm_parse_fpu, NULL},
29216 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
29217 arm_parse_float_abi, NULL},
29218 #ifdef OBJ_ELF
29219 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
29220 arm_parse_eabi, NULL},
29221 #endif
29222 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
29223 arm_parse_it_mode, NULL},
29224 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
29225 arm_ccs_mode, NULL},
29226 {NULL, NULL, 0, NULL}
29227 };
29228
29229 int
29230 md_parse_option (int c, const char * arg)
29231 {
29232 struct arm_option_table *opt;
29233 const struct arm_legacy_option_table *fopt;
29234 struct arm_long_option_table *lopt;
29235
29236 switch (c)
29237 {
29238 #ifdef OPTION_EB
29239 case OPTION_EB:
29240 target_big_endian = 1;
29241 break;
29242 #endif
29243
29244 #ifdef OPTION_EL
29245 case OPTION_EL:
29246 target_big_endian = 0;
29247 break;
29248 #endif
29249
29250 case OPTION_FIX_V4BX:
29251 fix_v4bx = TRUE;
29252 break;
29253
29254 #ifdef OBJ_ELF
29255 case OPTION_FDPIC:
29256 arm_fdpic = TRUE;
29257 break;
29258 #endif /* OBJ_ELF */
29259
29260 case 'a':
29261 /* Listing option. Just ignore these, we don't support additional
29262 ones. */
29263 return 0;
29264
29265 default:
29266 for (opt = arm_opts; opt->option != NULL; opt++)
29267 {
29268 if (c == opt->option[0]
29269 && ((arg == NULL && opt->option[1] == 0)
29270 || streq (arg, opt->option + 1)))
29271 {
29272 /* If the option is deprecated, tell the user. */
29273 if (warn_on_deprecated && opt->deprecated != NULL)
29274 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
29275 arg ? arg : "", _(opt->deprecated));
29276
29277 if (opt->var != NULL)
29278 *opt->var = opt->value;
29279
29280 return 1;
29281 }
29282 }
29283
29284 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
29285 {
29286 if (c == fopt->option[0]
29287 && ((arg == NULL && fopt->option[1] == 0)
29288 || streq (arg, fopt->option + 1)))
29289 {
29290 /* If the option is deprecated, tell the user. */
29291 if (warn_on_deprecated && fopt->deprecated != NULL)
29292 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
29293 arg ? arg : "", _(fopt->deprecated));
29294
29295 if (fopt->var != NULL)
29296 *fopt->var = &fopt->value;
29297
29298 return 1;
29299 }
29300 }
29301
29302 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
29303 {
29304 /* These options are expected to have an argument. */
29305 if (c == lopt->option[0]
29306 && arg != NULL
29307 && strncmp (arg, lopt->option + 1,
29308 strlen (lopt->option + 1)) == 0)
29309 {
29310 /* If the option is deprecated, tell the user. */
29311 if (warn_on_deprecated && lopt->deprecated != NULL)
29312 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
29313 _(lopt->deprecated));
29314
29315 /* Call the sup-option parser. */
29316 return lopt->func (arg + strlen (lopt->option) - 1);
29317 }
29318 }
29319
29320 return 0;
29321 }
29322
29323 return 1;
29324 }
29325
29326 void
29327 md_show_usage (FILE * fp)
29328 {
29329 struct arm_option_table *opt;
29330 struct arm_long_option_table *lopt;
29331
29332 fprintf (fp, _(" ARM-specific assembler options:\n"));
29333
29334 for (opt = arm_opts; opt->option != NULL; opt++)
29335 if (opt->help != NULL)
29336 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
29337
29338 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
29339 if (lopt->help != NULL)
29340 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
29341
29342 #ifdef OPTION_EB
29343 fprintf (fp, _("\
29344 -EB assemble code for a big-endian cpu\n"));
29345 #endif
29346
29347 #ifdef OPTION_EL
29348 fprintf (fp, _("\
29349 -EL assemble code for a little-endian cpu\n"));
29350 #endif
29351
29352 fprintf (fp, _("\
29353 --fix-v4bx Allow BX in ARMv4 code\n"));
29354
29355 #ifdef OBJ_ELF
29356 fprintf (fp, _("\
29357 --fdpic generate an FDPIC object file\n"));
29358 #endif /* OBJ_ELF */
29359 }
29360
29361 #ifdef OBJ_ELF
29362
29363 typedef struct
29364 {
29365 int val;
29366 arm_feature_set flags;
29367 } cpu_arch_ver_table;
29368
29369 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
29370 chronologically for architectures, with an exception for ARMv6-M and
29371 ARMv6S-M due to legacy reasons. No new architecture should have a
29372 special case. This allows for build attribute selection results to be
29373 stable when new architectures are added. */
29374 static const cpu_arch_ver_table cpu_arch_ver[] =
29375 {
29376 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
29377 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
29378 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
29379 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
29380 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
29381 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
29382 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
29383 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
29384 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
29385 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
29386 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
29387 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
29388 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
29389 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
29390 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
29391 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
29392 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
29393 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
29394 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
29395 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
29396 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
29397 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
29398 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
29399 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
29400
29401 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
29402 always selected build attributes to match those of ARMv6-M
29403 (resp. ARMv6S-M). However, due to these architectures being a strict
29404 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
29405 would be selected when fully respecting chronology of architectures.
29406 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
29407 move them before ARMv7 architectures. */
29408 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
29409 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
29410
29411 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
29412 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
29413 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
29414 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
29415 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
29416 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
29417 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
29418 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
29419 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
29420 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
29421 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
29422 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
29423 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
29424 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
29425 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
29426 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
29427 {-1, ARM_ARCH_NONE}
29428 };
29429
29430 /* Set an attribute if it has not already been set by the user. */
29431
29432 static void
29433 aeabi_set_attribute_int (int tag, int value)
29434 {
29435 if (tag < 1
29436 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
29437 || !attributes_set_explicitly[tag])
29438 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
29439 }
29440
29441 static void
29442 aeabi_set_attribute_string (int tag, const char *value)
29443 {
29444 if (tag < 1
29445 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
29446 || !attributes_set_explicitly[tag])
29447 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
29448 }
29449
29450 /* Return whether features in the *NEEDED feature set are available via
29451 extensions for the architecture whose feature set is *ARCH_FSET. */
29452
29453 static bfd_boolean
29454 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
29455 const arm_feature_set *needed)
29456 {
29457 int i, nb_allowed_archs;
29458 arm_feature_set ext_fset;
29459 const struct arm_option_extension_value_table *opt;
29460
29461 ext_fset = arm_arch_none;
29462 for (opt = arm_extensions; opt->name != NULL; opt++)
29463 {
29464 /* Extension does not provide any feature we need. */
29465 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
29466 continue;
29467
29468 nb_allowed_archs =
29469 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
29470 for (i = 0; i < nb_allowed_archs; i++)
29471 {
29472 /* Empty entry. */
29473 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
29474 break;
29475
29476 /* Extension is available, add it. */
29477 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
29478 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
29479 }
29480 }
29481
29482 /* Can we enable all features in *needed? */
29483 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
29484 }
29485
29486 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29487 a given architecture feature set *ARCH_EXT_FSET including extension feature
29488 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29489 - if true, check for an exact match of the architecture modulo extensions;
29490 - otherwise, select build attribute value of the first superset
29491 architecture released so that results remains stable when new architectures
29492 are added.
29493 For -march/-mcpu=all the build attribute value of the most featureful
29494 architecture is returned. Tag_CPU_arch_profile result is returned in
29495 PROFILE. */
29496
29497 static int
29498 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
29499 const arm_feature_set *ext_fset,
29500 char *profile, int exact_match)
29501 {
29502 arm_feature_set arch_fset;
29503 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
29504
29505 /* Select most featureful architecture with all its extensions if building
29506 for -march=all as the feature sets used to set build attributes. */
29507 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
29508 {
29509 /* Force revisiting of decision for each new architecture. */
29510 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
29511 *profile = 'A';
29512 return TAG_CPU_ARCH_V8;
29513 }
29514
29515 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
29516
29517 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
29518 {
29519 arm_feature_set known_arch_fset;
29520
29521 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
29522 if (exact_match)
29523 {
29524 /* Base architecture match user-specified architecture and
29525 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29526 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
29527 {
29528 p_ver_ret = p_ver;
29529 goto found;
29530 }
29531 /* Base architecture match user-specified architecture only
29532 (eg. ARMv6-M in the same case as above). Record it in case we
29533 find a match with above condition. */
29534 else if (p_ver_ret == NULL
29535 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
29536 p_ver_ret = p_ver;
29537 }
29538 else
29539 {
29540
29541 /* Architecture has all features wanted. */
29542 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
29543 {
29544 arm_feature_set added_fset;
29545
29546 /* Compute features added by this architecture over the one
29547 recorded in p_ver_ret. */
29548 if (p_ver_ret != NULL)
29549 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
29550 p_ver_ret->flags);
29551 /* First architecture that match incl. with extensions, or the
29552 only difference in features over the recorded match is
29553 features that were optional and are now mandatory. */
29554 if (p_ver_ret == NULL
29555 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
29556 {
29557 p_ver_ret = p_ver;
29558 goto found;
29559 }
29560 }
29561 else if (p_ver_ret == NULL)
29562 {
29563 arm_feature_set needed_ext_fset;
29564
29565 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
29566
29567 /* Architecture has all features needed when using some
29568 extensions. Record it and continue searching in case there
29569 exist an architecture providing all needed features without
29570 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29571 OS extension). */
29572 if (have_ext_for_needed_feat_p (&known_arch_fset,
29573 &needed_ext_fset))
29574 p_ver_ret = p_ver;
29575 }
29576 }
29577 }
29578
29579 if (p_ver_ret == NULL)
29580 return -1;
29581
29582 found:
29583 /* Tag_CPU_arch_profile. */
29584 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
29585 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
29586 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
29587 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
29588 *profile = 'A';
29589 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
29590 *profile = 'R';
29591 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
29592 *profile = 'M';
29593 else
29594 *profile = '\0';
29595 return p_ver_ret->val;
29596 }
29597
29598 /* Set the public EABI object attributes. */
29599
29600 static void
29601 aeabi_set_public_attributes (void)
29602 {
29603 char profile = '\0';
29604 int arch = -1;
29605 int virt_sec = 0;
29606 int fp16_optional = 0;
29607 int skip_exact_match = 0;
29608 arm_feature_set flags, flags_arch, flags_ext;
29609
29610 /* Autodetection mode, choose the architecture based the instructions
29611 actually used. */
29612 if (no_cpu_selected ())
29613 {
29614 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
29615
29616 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
29617 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
29618
29619 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
29620 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
29621
29622 /* Code run during relaxation relies on selected_cpu being set. */
29623 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29624 flags_ext = arm_arch_none;
29625 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
29626 selected_ext = flags_ext;
29627 selected_cpu = flags;
29628 }
29629 /* Otherwise, choose the architecture based on the capabilities of the
29630 requested cpu. */
29631 else
29632 {
29633 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
29634 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
29635 flags_ext = selected_ext;
29636 flags = selected_cpu;
29637 }
29638 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
29639
29640 /* Allow the user to override the reported architecture. */
29641 if (!ARM_FEATURE_ZERO (selected_object_arch))
29642 {
29643 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
29644 flags_ext = arm_arch_none;
29645 }
29646 else
29647 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
29648
29649 /* When this function is run again after relaxation has happened there is no
29650 way to determine whether an architecture or CPU was specified by the user:
29651 - selected_cpu is set above for relaxation to work;
29652 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29653 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29654 Therefore, if not in -march=all case we first try an exact match and fall
29655 back to autodetection. */
29656 if (!skip_exact_match)
29657 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
29658 if (arch == -1)
29659 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
29660 if (arch == -1)
29661 as_bad (_("no architecture contains all the instructions used\n"));
29662
29663 /* Tag_CPU_name. */
29664 if (selected_cpu_name[0])
29665 {
29666 char *q;
29667
29668 q = selected_cpu_name;
29669 if (strncmp (q, "armv", 4) == 0)
29670 {
29671 int i;
29672
29673 q += 4;
29674 for (i = 0; q[i]; i++)
29675 q[i] = TOUPPER (q[i]);
29676 }
29677 aeabi_set_attribute_string (Tag_CPU_name, q);
29678 }
29679
29680 /* Tag_CPU_arch. */
29681 aeabi_set_attribute_int (Tag_CPU_arch, arch);
29682
29683 /* Tag_CPU_arch_profile. */
29684 if (profile != '\0')
29685 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
29686
29687 /* Tag_DSP_extension. */
29688 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
29689 aeabi_set_attribute_int (Tag_DSP_extension, 1);
29690
29691 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29692 /* Tag_ARM_ISA_use. */
29693 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
29694 || ARM_FEATURE_ZERO (flags_arch))
29695 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
29696
29697 /* Tag_THUMB_ISA_use. */
29698 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
29699 || ARM_FEATURE_ZERO (flags_arch))
29700 {
29701 int thumb_isa_use;
29702
29703 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29704 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
29705 thumb_isa_use = 3;
29706 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
29707 thumb_isa_use = 2;
29708 else
29709 thumb_isa_use = 1;
29710 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
29711 }
29712
29713 /* Tag_VFP_arch. */
29714 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
29715 aeabi_set_attribute_int (Tag_VFP_arch,
29716 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29717 ? 7 : 8);
29718 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
29719 aeabi_set_attribute_int (Tag_VFP_arch,
29720 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29721 ? 5 : 6);
29722 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
29723 {
29724 fp16_optional = 1;
29725 aeabi_set_attribute_int (Tag_VFP_arch, 3);
29726 }
29727 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
29728 {
29729 aeabi_set_attribute_int (Tag_VFP_arch, 4);
29730 fp16_optional = 1;
29731 }
29732 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
29733 aeabi_set_attribute_int (Tag_VFP_arch, 2);
29734 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
29735 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
29736 aeabi_set_attribute_int (Tag_VFP_arch, 1);
29737
29738 /* Tag_ABI_HardFP_use. */
29739 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
29740 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
29741 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
29742
29743 /* Tag_WMMX_arch. */
29744 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
29745 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
29746 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
29747 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
29748
29749 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29750 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
29751 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
29752 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
29753 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
29754 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
29755 {
29756 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
29757 {
29758 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
29759 }
29760 else
29761 {
29762 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
29763 fp16_optional = 1;
29764 }
29765 }
29766
29767 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
29768 aeabi_set_attribute_int (Tag_MVE_arch, 2);
29769 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
29770 aeabi_set_attribute_int (Tag_MVE_arch, 1);
29771
29772 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29773 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
29774 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
29775
29776 /* Tag_DIV_use.
29777
29778 We set Tag_DIV_use to two when integer divide instructions have been used
29779 in ARM state, or when Thumb integer divide instructions have been used,
29780 but we have no architecture profile set, nor have we any ARM instructions.
29781
29782 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29783 by the base architecture.
29784
29785 For new architectures we will have to check these tests. */
29786 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
29787 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29788 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
29789 aeabi_set_attribute_int (Tag_DIV_use, 0);
29790 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
29791 || (profile == '\0'
29792 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
29793 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
29794 aeabi_set_attribute_int (Tag_DIV_use, 2);
29795
29796 /* Tag_MP_extension_use. */
29797 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
29798 aeabi_set_attribute_int (Tag_MPextension_use, 1);
29799
29800 /* Tag Virtualization_use. */
29801 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
29802 virt_sec |= 1;
29803 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
29804 virt_sec |= 2;
29805 if (virt_sec != 0)
29806 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
29807 }
29808
29809 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29810 finished and free extension feature bits which will not be used anymore. */
29811
29812 void
29813 arm_md_post_relax (void)
29814 {
29815 aeabi_set_public_attributes ();
29816 XDELETE (mcpu_ext_opt);
29817 mcpu_ext_opt = NULL;
29818 XDELETE (march_ext_opt);
29819 march_ext_opt = NULL;
29820 }
29821
29822 /* Add the default contents for the .ARM.attributes section. */
29823
29824 void
29825 arm_md_end (void)
29826 {
29827 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
29828 return;
29829
29830 aeabi_set_public_attributes ();
29831 }
29832 #endif /* OBJ_ELF */
29833
29834 /* Parse a .cpu directive. */
29835
29836 static void
29837 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
29838 {
29839 const struct arm_cpu_option_table *opt;
29840 char *name;
29841 char saved_char;
29842
29843 name = input_line_pointer;
29844 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29845 input_line_pointer++;
29846 saved_char = *input_line_pointer;
29847 *input_line_pointer = 0;
29848
29849 /* Skip the first "all" entry. */
29850 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
29851 if (streq (opt->name, name))
29852 {
29853 selected_arch = opt->value;
29854 selected_ext = opt->ext;
29855 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29856 if (opt->canonical_name)
29857 strcpy (selected_cpu_name, opt->canonical_name);
29858 else
29859 {
29860 int i;
29861 for (i = 0; opt->name[i]; i++)
29862 selected_cpu_name[i] = TOUPPER (opt->name[i]);
29863
29864 selected_cpu_name[i] = 0;
29865 }
29866 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29867
29868 *input_line_pointer = saved_char;
29869 demand_empty_rest_of_line ();
29870 return;
29871 }
29872 as_bad (_("unknown cpu `%s'"), name);
29873 *input_line_pointer = saved_char;
29874 ignore_rest_of_line ();
29875 }
29876
29877 /* Parse a .arch directive. */
29878
29879 static void
29880 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
29881 {
29882 const struct arm_arch_option_table *opt;
29883 char saved_char;
29884 char *name;
29885
29886 name = input_line_pointer;
29887 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29888 input_line_pointer++;
29889 saved_char = *input_line_pointer;
29890 *input_line_pointer = 0;
29891
29892 /* Skip the first "all" entry. */
29893 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29894 if (streq (opt->name, name))
29895 {
29896 selected_arch = opt->value;
29897 selected_ext = arm_arch_none;
29898 selected_cpu = selected_arch;
29899 strcpy (selected_cpu_name, opt->name);
29900 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29901 *input_line_pointer = saved_char;
29902 demand_empty_rest_of_line ();
29903 return;
29904 }
29905
29906 as_bad (_("unknown architecture `%s'\n"), name);
29907 *input_line_pointer = saved_char;
29908 ignore_rest_of_line ();
29909 }
29910
29911 /* Parse a .object_arch directive. */
29912
29913 static void
29914 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
29915 {
29916 const struct arm_arch_option_table *opt;
29917 char saved_char;
29918 char *name;
29919
29920 name = input_line_pointer;
29921 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29922 input_line_pointer++;
29923 saved_char = *input_line_pointer;
29924 *input_line_pointer = 0;
29925
29926 /* Skip the first "all" entry. */
29927 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29928 if (streq (opt->name, name))
29929 {
29930 selected_object_arch = opt->value;
29931 *input_line_pointer = saved_char;
29932 demand_empty_rest_of_line ();
29933 return;
29934 }
29935
29936 as_bad (_("unknown architecture `%s'\n"), name);
29937 *input_line_pointer = saved_char;
29938 ignore_rest_of_line ();
29939 }
29940
29941 /* Parse a .arch_extension directive. */
29942
29943 static void
29944 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
29945 {
29946 const struct arm_option_extension_value_table *opt;
29947 char saved_char;
29948 char *name;
29949 int adding_value = 1;
29950
29951 name = input_line_pointer;
29952 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29953 input_line_pointer++;
29954 saved_char = *input_line_pointer;
29955 *input_line_pointer = 0;
29956
29957 if (strlen (name) >= 2
29958 && strncmp (name, "no", 2) == 0)
29959 {
29960 adding_value = 0;
29961 name += 2;
29962 }
29963
29964 for (opt = arm_extensions; opt->name != NULL; opt++)
29965 if (streq (opt->name, name))
29966 {
29967 int i, nb_allowed_archs =
29968 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
29969 for (i = 0; i < nb_allowed_archs; i++)
29970 {
29971 /* Empty entry. */
29972 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
29973 continue;
29974 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
29975 break;
29976 }
29977
29978 if (i == nb_allowed_archs)
29979 {
29980 as_bad (_("architectural extension `%s' is not allowed for the "
29981 "current base architecture"), name);
29982 break;
29983 }
29984
29985 if (adding_value)
29986 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
29987 opt->merge_value);
29988 else
29989 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
29990
29991 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29992 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29993 *input_line_pointer = saved_char;
29994 demand_empty_rest_of_line ();
29995 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29996 on this return so that duplicate extensions (extensions with the
29997 same name as a previous extension in the list) are not considered
29998 for command-line parsing. */
29999 return;
30000 }
30001
30002 if (opt->name == NULL)
30003 as_bad (_("unknown architecture extension `%s'\n"), name);
30004
30005 *input_line_pointer = saved_char;
30006 ignore_rest_of_line ();
30007 }
30008
30009 /* Parse a .fpu directive. */
30010
30011 static void
30012 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
30013 {
30014 const struct arm_option_fpu_value_table *opt;
30015 char saved_char;
30016 char *name;
30017
30018 name = input_line_pointer;
30019 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
30020 input_line_pointer++;
30021 saved_char = *input_line_pointer;
30022 *input_line_pointer = 0;
30023
30024 for (opt = arm_fpus; opt->name != NULL; opt++)
30025 if (streq (opt->name, name))
30026 {
30027 selected_fpu = opt->value;
30028 #ifndef CPU_DEFAULT
30029 if (no_cpu_selected ())
30030 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30031 else
30032 #endif
30033 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30034 *input_line_pointer = saved_char;
30035 demand_empty_rest_of_line ();
30036 return;
30037 }
30038
30039 as_bad (_("unknown floating point format `%s'\n"), name);
30040 *input_line_pointer = saved_char;
30041 ignore_rest_of_line ();
30042 }
30043
30044 /* Copy symbol information. */
30045
30046 void
30047 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
30048 {
30049 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
30050 }
30051
30052 #ifdef OBJ_ELF
30053 /* Given a symbolic attribute NAME, return the proper integer value.
30054 Returns -1 if the attribute is not known. */
30055
30056 int
30057 arm_convert_symbolic_attribute (const char *name)
30058 {
30059 static const struct
30060 {
30061 const char * name;
30062 const int tag;
30063 }
30064 attribute_table[] =
30065 {
30066 /* When you modify this table you should
30067 also modify the list in doc/c-arm.texi. */
30068 #define T(tag) {#tag, tag}
30069 T (Tag_CPU_raw_name),
30070 T (Tag_CPU_name),
30071 T (Tag_CPU_arch),
30072 T (Tag_CPU_arch_profile),
30073 T (Tag_ARM_ISA_use),
30074 T (Tag_THUMB_ISA_use),
30075 T (Tag_FP_arch),
30076 T (Tag_VFP_arch),
30077 T (Tag_WMMX_arch),
30078 T (Tag_Advanced_SIMD_arch),
30079 T (Tag_PCS_config),
30080 T (Tag_ABI_PCS_R9_use),
30081 T (Tag_ABI_PCS_RW_data),
30082 T (Tag_ABI_PCS_RO_data),
30083 T (Tag_ABI_PCS_GOT_use),
30084 T (Tag_ABI_PCS_wchar_t),
30085 T (Tag_ABI_FP_rounding),
30086 T (Tag_ABI_FP_denormal),
30087 T (Tag_ABI_FP_exceptions),
30088 T (Tag_ABI_FP_user_exceptions),
30089 T (Tag_ABI_FP_number_model),
30090 T (Tag_ABI_align_needed),
30091 T (Tag_ABI_align8_needed),
30092 T (Tag_ABI_align_preserved),
30093 T (Tag_ABI_align8_preserved),
30094 T (Tag_ABI_enum_size),
30095 T (Tag_ABI_HardFP_use),
30096 T (Tag_ABI_VFP_args),
30097 T (Tag_ABI_WMMX_args),
30098 T (Tag_ABI_optimization_goals),
30099 T (Tag_ABI_FP_optimization_goals),
30100 T (Tag_compatibility),
30101 T (Tag_CPU_unaligned_access),
30102 T (Tag_FP_HP_extension),
30103 T (Tag_VFP_HP_extension),
30104 T (Tag_ABI_FP_16bit_format),
30105 T (Tag_MPextension_use),
30106 T (Tag_DIV_use),
30107 T (Tag_nodefaults),
30108 T (Tag_also_compatible_with),
30109 T (Tag_conformance),
30110 T (Tag_T2EE_use),
30111 T (Tag_Virtualization_use),
30112 T (Tag_DSP_extension),
30113 T (Tag_MVE_arch),
30114 /* We deliberately do not include Tag_MPextension_use_legacy. */
30115 #undef T
30116 };
30117 unsigned int i;
30118
30119 if (name == NULL)
30120 return -1;
30121
30122 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
30123 if (streq (name, attribute_table[i].name))
30124 return attribute_table[i].tag;
30125
30126 return -1;
30127 }
30128
30129 /* Apply sym value for relocations only in the case that they are for
30130 local symbols in the same segment as the fixup and you have the
30131 respective architectural feature for blx and simple switches. */
30132
30133 int
30134 arm_apply_sym_value (struct fix * fixP, segT this_seg)
30135 {
30136 if (fixP->fx_addsy
30137 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
30138 /* PR 17444: If the local symbol is in a different section then a reloc
30139 will always be generated for it, so applying the symbol value now
30140 will result in a double offset being stored in the relocation. */
30141 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
30142 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
30143 {
30144 switch (fixP->fx_r_type)
30145 {
30146 case BFD_RELOC_ARM_PCREL_BLX:
30147 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30148 if (ARM_IS_FUNC (fixP->fx_addsy))
30149 return 1;
30150 break;
30151
30152 case BFD_RELOC_ARM_PCREL_CALL:
30153 case BFD_RELOC_THUMB_PCREL_BLX:
30154 if (THUMB_IS_FUNC (fixP->fx_addsy))
30155 return 1;
30156 break;
30157
30158 default:
30159 break;
30160 }
30161
30162 }
30163 return 0;
30164 }
30165 #endif /* OBJ_ELF */
This page took 0.994529 seconds and 5 git commands to generate.