[PATCH 5/57][Arm][GAS] Add support for MVE instructions: vmull{b,t}
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 static const arm_feature_set mve_ext =
306 ARM_FEATURE_COPROC (FPU_MVE);
307 static const arm_feature_set mve_fp_ext =
308 ARM_FEATURE_COPROC (FPU_MVE_FP);
309 #ifdef OBJ_ELF
310 static const arm_feature_set fpu_vfp_fp16 =
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
312 static const arm_feature_set fpu_neon_ext_fma =
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
314 #endif
315 static const arm_feature_set fpu_vfp_ext_fma =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
317 static const arm_feature_set fpu_vfp_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
319 static const arm_feature_set fpu_vfp_ext_armv8xd =
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
321 static const arm_feature_set fpu_neon_ext_armv8 =
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
323 static const arm_feature_set fpu_crypto_ext_armv8 =
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
325 static const arm_feature_set crc_ext_armv8 =
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
327 static const arm_feature_set fpu_neon_ext_v8_1 =
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
329 static const arm_feature_set fpu_neon_ext_dotprod =
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
331
332 static int mfloat_abi_opt = -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
334 directive. */
335 static arm_feature_set selected_arch = ARM_ARCH_NONE;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
337 directive. */
338 static arm_feature_set selected_ext = ARM_ARCH_NONE;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
341 directive. */
342 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu = FPU_NONE;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name[20];
349
350 extern FLONUM_TYPE generic_floating_point_number;
351
352 /* Return if no cpu was selected on command-line. */
353 static bfd_boolean
354 no_cpu_selected (void)
355 {
356 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
357 }
358
359 #ifdef OBJ_ELF
360 # ifdef EABI_DEFAULT
361 static int meabi_flags = EABI_DEFAULT;
362 # else
363 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
364 # endif
365
366 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
367
368 bfd_boolean
369 arm_is_eabi (void)
370 {
371 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
372 }
373 #endif
374
375 #ifdef OBJ_ELF
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS * GOT_symbol;
378 #endif
379
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
383 instructions. */
384 static int thumb_mode = 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
389
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
392 {
393 IMPLICIT_IT_MODE_NEVER = 0x00,
394 IMPLICIT_IT_MODE_ARM = 0x01,
395 IMPLICIT_IT_MODE_THUMB = 0x02,
396 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
397 };
398 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
399
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
402
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
407 there.)
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
410 machine code.
411
412 Important differences from the old Thumb mode:
413
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
422
423 static bfd_boolean unified_syntax = FALSE;
424
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars[] = "#[]{}";
430
431 enum neon_el_type
432 {
433 NT_invtype,
434 NT_untyped,
435 NT_integer,
436 NT_float,
437 NT_poly,
438 NT_signed,
439 NT_unsigned
440 };
441
442 struct neon_type_el
443 {
444 enum neon_el_type type;
445 unsigned size;
446 };
447
448 #define NEON_MAX_TYPE_ELS 4
449
450 struct neon_type
451 {
452 struct neon_type_el el[NEON_MAX_TYPE_ELS];
453 unsigned elems;
454 };
455
456 enum pred_instruction_type
457 {
458 OUTSIDE_PRED_INSN,
459 INSIDE_VPT_INSN,
460 INSIDE_IT_INSN,
461 INSIDE_IT_LAST_INSN,
462 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN, /* The IT insn has been parsed. */
467 VPT_INSN, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 };
471
472 /* The maximum number of operands we need. */
473 #define ARM_IT_MAX_OPERANDS 6
474 #define ARM_IT_MAX_RELOCS 3
475
476 struct arm_it
477 {
478 const char * error;
479 unsigned long instruction;
480 int size;
481 int size_req;
482 int cond;
483 /* "uncond_value" is set to the value in place of the conditional field in
484 unconditional versions of the instruction, or -1 if nothing is
485 appropriate. */
486 int uncond_value;
487 struct neon_type vectype;
488 /* This does not indicate an actual NEON instruction, only that
489 the mnemonic accepts neon-style type suffixes. */
490 int is_neon;
491 /* Set to the opcode if the instruction needs relaxation.
492 Zero if the instruction is not relaxed. */
493 unsigned long relax;
494 struct
495 {
496 bfd_reloc_code_real_type type;
497 expressionS exp;
498 int pc_rel;
499 } relocs[ARM_IT_MAX_RELOCS];
500
501 enum pred_instruction_type pred_insn_type;
502
503 struct
504 {
505 unsigned reg;
506 signed int imm;
507 struct neon_type_el vectype;
508 unsigned present : 1; /* Operand present. */
509 unsigned isreg : 1; /* Operand was a register. */
510 unsigned immisreg : 1; /* .imm field is a second register. */
511 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
512 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
513 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
514 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
515 instructions. This allows us to disambiguate ARM <-> vector insns. */
516 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
517 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
518 unsigned isquad : 1; /* Operand is SIMD quad register. */
519 unsigned issingle : 1; /* Operand is VFP single-precision register. */
520 unsigned hasreloc : 1; /* Operand has relocation suffix. */
521 unsigned writeback : 1; /* Operand has trailing ! */
522 unsigned preind : 1; /* Preindexed address. */
523 unsigned postind : 1; /* Postindexed address. */
524 unsigned negative : 1; /* Index register was negated. */
525 unsigned shifted : 1; /* Shift applied to operation. */
526 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
527 } operands[ARM_IT_MAX_OPERANDS];
528 };
529
530 static struct arm_it inst;
531
532 #define NUM_FLOAT_VALS 8
533
534 const char * fp_const[] =
535 {
536 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
537 };
538
539 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
540
541 #define FAIL (-1)
542 #define SUCCESS (0)
543
544 #define SUFF_S 1
545 #define SUFF_D 2
546 #define SUFF_E 3
547 #define SUFF_P 4
548
549 #define CP_T_X 0x00008000
550 #define CP_T_Y 0x00400000
551
552 #define CONDS_BIT 0x00100000
553 #define LOAD_BIT 0x00100000
554
555 #define DOUBLE_LOAD_FLAG 0x00000001
556
557 struct asm_cond
558 {
559 const char * template_name;
560 unsigned long value;
561 };
562
563 #define COND_ALWAYS 0xE
564
565 struct asm_psr
566 {
567 const char * template_name;
568 unsigned long field;
569 };
570
571 struct asm_barrier_opt
572 {
573 const char * template_name;
574 unsigned long value;
575 const arm_feature_set arch;
576 };
577
578 /* The bit that distinguishes CPSR and SPSR. */
579 #define SPSR_BIT (1 << 22)
580
581 /* The individual PSR flag bits. */
582 #define PSR_c (1 << 16)
583 #define PSR_x (1 << 17)
584 #define PSR_s (1 << 18)
585 #define PSR_f (1 << 19)
586
587 struct reloc_entry
588 {
589 const char * name;
590 bfd_reloc_code_real_type reloc;
591 };
592
593 enum vfp_reg_pos
594 {
595 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
596 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
597 };
598
599 enum vfp_ldstm_type
600 {
601 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
602 };
603
604 /* Bits for DEFINED field in neon_typed_alias. */
605 #define NTA_HASTYPE 1
606 #define NTA_HASINDEX 2
607
608 struct neon_typed_alias
609 {
610 unsigned char defined;
611 unsigned char index;
612 struct neon_type_el eltype;
613 };
614
615 /* ARM register categories. This includes coprocessor numbers and various
616 architecture extensions' registers. Each entry should have an error message
617 in reg_expected_msgs below. */
618 enum arm_reg_type
619 {
620 REG_TYPE_RN,
621 REG_TYPE_CP,
622 REG_TYPE_CN,
623 REG_TYPE_FN,
624 REG_TYPE_VFS,
625 REG_TYPE_VFD,
626 REG_TYPE_NQ,
627 REG_TYPE_VFSD,
628 REG_TYPE_NDQ,
629 REG_TYPE_NSD,
630 REG_TYPE_NSDQ,
631 REG_TYPE_VFC,
632 REG_TYPE_MVF,
633 REG_TYPE_MVD,
634 REG_TYPE_MVFX,
635 REG_TYPE_MVDX,
636 REG_TYPE_MVAX,
637 REG_TYPE_MQ,
638 REG_TYPE_DSPSC,
639 REG_TYPE_MMXWR,
640 REG_TYPE_MMXWC,
641 REG_TYPE_MMXWCG,
642 REG_TYPE_XSCALE,
643 REG_TYPE_RNB,
644 };
645
646 /* Structure for a hash table entry for a register.
647 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
648 information which states whether a vector type or index is specified (for a
649 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
650 struct reg_entry
651 {
652 const char * name;
653 unsigned int number;
654 unsigned char type;
655 unsigned char builtin;
656 struct neon_typed_alias * neon;
657 };
658
659 /* Diagnostics used when we don't get a register of the expected type. */
660 const char * const reg_expected_msgs[] =
661 {
662 [REG_TYPE_RN] = N_("ARM register expected"),
663 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
664 [REG_TYPE_CN] = N_("co-processor register expected"),
665 [REG_TYPE_FN] = N_("FPA register expected"),
666 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
667 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
668 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
669 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
670 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
671 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
672 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
673 " expected"),
674 [REG_TYPE_VFC] = N_("VFP system register expected"),
675 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
676 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
677 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
678 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
679 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
680 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
681 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
682 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
683 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
684 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
685 [REG_TYPE_MQ] = N_("MVE vector register expected"),
686 [REG_TYPE_RNB] = N_("")
687 };
688
689 /* Some well known registers that we refer to directly elsewhere. */
690 #define REG_R12 12
691 #define REG_SP 13
692 #define REG_LR 14
693 #define REG_PC 15
694
695 /* ARM instructions take 4bytes in the object file, Thumb instructions
696 take 2: */
697 #define INSN_SIZE 4
698
699 struct asm_opcode
700 {
701 /* Basic string to match. */
702 const char * template_name;
703
704 /* Parameters to instruction. */
705 unsigned int operands[8];
706
707 /* Conditional tag - see opcode_lookup. */
708 unsigned int tag : 4;
709
710 /* Basic instruction code. */
711 unsigned int avalue;
712
713 /* Thumb-format instruction code. */
714 unsigned int tvalue;
715
716 /* Which architecture variant provides this instruction. */
717 const arm_feature_set * avariant;
718 const arm_feature_set * tvariant;
719
720 /* Function to call to encode instruction in ARM format. */
721 void (* aencode) (void);
722
723 /* Function to call to encode instruction in Thumb format. */
724 void (* tencode) (void);
725
726 /* Indicates whether this instruction may be vector predicated. */
727 unsigned int mayBeVecPred : 1;
728 };
729
730 /* Defines for various bits that we will want to toggle. */
731 #define INST_IMMEDIATE 0x02000000
732 #define OFFSET_REG 0x02000000
733 #define HWOFFSET_IMM 0x00400000
734 #define SHIFT_BY_REG 0x00000010
735 #define PRE_INDEX 0x01000000
736 #define INDEX_UP 0x00800000
737 #define WRITE_BACK 0x00200000
738 #define LDM_TYPE_2_OR_3 0x00400000
739 #define CPSI_MMOD 0x00020000
740
741 #define LITERAL_MASK 0xf000f000
742 #define OPCODE_MASK 0xfe1fffff
743 #define V4_STR_BIT 0x00000020
744 #define VLDR_VMOV_SAME 0x0040f000
745
746 #define T2_SUBS_PC_LR 0xf3de8f00
747
748 #define DATA_OP_SHIFT 21
749 #define SBIT_SHIFT 20
750
751 #define T2_OPCODE_MASK 0xfe1fffff
752 #define T2_DATA_OP_SHIFT 21
753 #define T2_SBIT_SHIFT 20
754
755 #define A_COND_MASK 0xf0000000
756 #define A_PUSH_POP_OP_MASK 0x0fff0000
757
758 /* Opcodes for pushing/poping registers to/from the stack. */
759 #define A1_OPCODE_PUSH 0x092d0000
760 #define A2_OPCODE_PUSH 0x052d0004
761 #define A2_OPCODE_POP 0x049d0004
762
763 /* Codes to distinguish the arithmetic instructions. */
764 #define OPCODE_AND 0
765 #define OPCODE_EOR 1
766 #define OPCODE_SUB 2
767 #define OPCODE_RSB 3
768 #define OPCODE_ADD 4
769 #define OPCODE_ADC 5
770 #define OPCODE_SBC 6
771 #define OPCODE_RSC 7
772 #define OPCODE_TST 8
773 #define OPCODE_TEQ 9
774 #define OPCODE_CMP 10
775 #define OPCODE_CMN 11
776 #define OPCODE_ORR 12
777 #define OPCODE_MOV 13
778 #define OPCODE_BIC 14
779 #define OPCODE_MVN 15
780
781 #define T2_OPCODE_AND 0
782 #define T2_OPCODE_BIC 1
783 #define T2_OPCODE_ORR 2
784 #define T2_OPCODE_ORN 3
785 #define T2_OPCODE_EOR 4
786 #define T2_OPCODE_ADD 8
787 #define T2_OPCODE_ADC 10
788 #define T2_OPCODE_SBC 11
789 #define T2_OPCODE_SUB 13
790 #define T2_OPCODE_RSB 14
791
792 #define T_OPCODE_MUL 0x4340
793 #define T_OPCODE_TST 0x4200
794 #define T_OPCODE_CMN 0x42c0
795 #define T_OPCODE_NEG 0x4240
796 #define T_OPCODE_MVN 0x43c0
797
798 #define T_OPCODE_ADD_R3 0x1800
799 #define T_OPCODE_SUB_R3 0x1a00
800 #define T_OPCODE_ADD_HI 0x4400
801 #define T_OPCODE_ADD_ST 0xb000
802 #define T_OPCODE_SUB_ST 0xb080
803 #define T_OPCODE_ADD_SP 0xa800
804 #define T_OPCODE_ADD_PC 0xa000
805 #define T_OPCODE_ADD_I8 0x3000
806 #define T_OPCODE_SUB_I8 0x3800
807 #define T_OPCODE_ADD_I3 0x1c00
808 #define T_OPCODE_SUB_I3 0x1e00
809
810 #define T_OPCODE_ASR_R 0x4100
811 #define T_OPCODE_LSL_R 0x4080
812 #define T_OPCODE_LSR_R 0x40c0
813 #define T_OPCODE_ROR_R 0x41c0
814 #define T_OPCODE_ASR_I 0x1000
815 #define T_OPCODE_LSL_I 0x0000
816 #define T_OPCODE_LSR_I 0x0800
817
818 #define T_OPCODE_MOV_I8 0x2000
819 #define T_OPCODE_CMP_I8 0x2800
820 #define T_OPCODE_CMP_LR 0x4280
821 #define T_OPCODE_MOV_HR 0x4600
822 #define T_OPCODE_CMP_HR 0x4500
823
824 #define T_OPCODE_LDR_PC 0x4800
825 #define T_OPCODE_LDR_SP 0x9800
826 #define T_OPCODE_STR_SP 0x9000
827 #define T_OPCODE_LDR_IW 0x6800
828 #define T_OPCODE_STR_IW 0x6000
829 #define T_OPCODE_LDR_IH 0x8800
830 #define T_OPCODE_STR_IH 0x8000
831 #define T_OPCODE_LDR_IB 0x7800
832 #define T_OPCODE_STR_IB 0x7000
833 #define T_OPCODE_LDR_RW 0x5800
834 #define T_OPCODE_STR_RW 0x5000
835 #define T_OPCODE_LDR_RH 0x5a00
836 #define T_OPCODE_STR_RH 0x5200
837 #define T_OPCODE_LDR_RB 0x5c00
838 #define T_OPCODE_STR_RB 0x5400
839
840 #define T_OPCODE_PUSH 0xb400
841 #define T_OPCODE_POP 0xbc00
842
843 #define T_OPCODE_BRANCH 0xe000
844
845 #define THUMB_SIZE 2 /* Size of thumb instruction. */
846 #define THUMB_PP_PC_LR 0x0100
847 #define THUMB_LOAD_BIT 0x0800
848 #define THUMB2_LOAD_BIT 0x00100000
849
850 #define BAD_SYNTAX _("syntax error")
851 #define BAD_ARGS _("bad arguments to instruction")
852 #define BAD_SP _("r13 not allowed here")
853 #define BAD_PC _("r15 not allowed here")
854 #define BAD_ODD _("Odd register not allowed here")
855 #define BAD_EVEN _("Even register not allowed here")
856 #define BAD_COND _("instruction cannot be conditional")
857 #define BAD_OVERLAP _("registers may not be the same")
858 #define BAD_HIREG _("lo register required")
859 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
860 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
861 #define BAD_BRANCH _("branch must be last instruction in IT block")
862 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
863 #define BAD_NOT_IT _("instruction not allowed in IT block")
864 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
865 #define BAD_FPU _("selected FPU does not support instruction")
866 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
867 #define BAD_OUT_VPT \
868 _("vector predicated instruction should be in VPT/VPST block")
869 #define BAD_IT_COND _("incorrect condition in IT block")
870 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
871 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
872 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
873 #define BAD_PC_ADDRESSING \
874 _("cannot use register index with PC-relative addressing")
875 #define BAD_PC_WRITEBACK \
876 _("cannot use writeback with PC-relative addressing")
877 #define BAD_RANGE _("branch out of range")
878 #define BAD_FP16 _("selected processor does not support fp16 instruction")
879 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
880 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
881 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
882 "block")
883 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
884 "block")
885 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
886 " operand")
887 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
888 " operand")
889 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
890 #define BAD_MVE_AUTO \
891 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
892 " use a valid -march or -mcpu option.")
893 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
894 "and source operands makes instruction UNPREDICTABLE")
895
896 static struct hash_control * arm_ops_hsh;
897 static struct hash_control * arm_cond_hsh;
898 static struct hash_control * arm_vcond_hsh;
899 static struct hash_control * arm_shift_hsh;
900 static struct hash_control * arm_psr_hsh;
901 static struct hash_control * arm_v7m_psr_hsh;
902 static struct hash_control * arm_reg_hsh;
903 static struct hash_control * arm_reloc_hsh;
904 static struct hash_control * arm_barrier_opt_hsh;
905
906 /* Stuff needed to resolve the label ambiguity
907 As:
908 ...
909 label: <insn>
910 may differ from:
911 ...
912 label:
913 <insn> */
914
915 symbolS * last_label_seen;
916 static int label_is_thumb_function_name = FALSE;
917
918 /* Literal pool structure. Held on a per-section
919 and per-sub-section basis. */
920
921 #define MAX_LITERAL_POOL_SIZE 1024
922 typedef struct literal_pool
923 {
924 expressionS literals [MAX_LITERAL_POOL_SIZE];
925 unsigned int next_free_entry;
926 unsigned int id;
927 symbolS * symbol;
928 segT section;
929 subsegT sub_section;
930 #ifdef OBJ_ELF
931 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
932 #endif
933 struct literal_pool * next;
934 unsigned int alignment;
935 } literal_pool;
936
937 /* Pointer to a linked list of literal pools. */
938 literal_pool * list_of_pools = NULL;
939
940 typedef enum asmfunc_states
941 {
942 OUTSIDE_ASMFUNC,
943 WAITING_ASMFUNC_NAME,
944 WAITING_ENDASMFUNC
945 } asmfunc_states;
946
947 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
948
949 #ifdef OBJ_ELF
950 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
951 #else
952 static struct current_pred now_pred;
953 #endif
954
955 static inline int
956 now_pred_compatible (int cond)
957 {
958 return (cond & ~1) == (now_pred.cc & ~1);
959 }
960
961 static inline int
962 conditional_insn (void)
963 {
964 return inst.cond != COND_ALWAYS;
965 }
966
967 static int in_pred_block (void);
968
969 static int handle_pred_state (void);
970
971 static void force_automatic_it_block_close (void);
972
973 static void it_fsm_post_encode (void);
974
975 #define set_pred_insn_type(type) \
976 do \
977 { \
978 inst.pred_insn_type = type; \
979 if (handle_pred_state () == FAIL) \
980 return; \
981 } \
982 while (0)
983
984 #define set_pred_insn_type_nonvoid(type, failret) \
985 do \
986 { \
987 inst.pred_insn_type = type; \
988 if (handle_pred_state () == FAIL) \
989 return failret; \
990 } \
991 while(0)
992
993 #define set_pred_insn_type_last() \
994 do \
995 { \
996 if (inst.cond == COND_ALWAYS) \
997 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
998 else \
999 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1000 } \
1001 while (0)
1002
1003 /* Pure syntax. */
1004
1005 /* This array holds the chars that always start a comment. If the
1006 pre-processor is disabled, these aren't very useful. */
1007 char arm_comment_chars[] = "@";
1008
1009 /* This array holds the chars that only start a comment at the beginning of
1010 a line. If the line seems to have the form '# 123 filename'
1011 .line and .file directives will appear in the pre-processed output. */
1012 /* Note that input_file.c hand checks for '#' at the beginning of the
1013 first line of the input file. This is because the compiler outputs
1014 #NO_APP at the beginning of its output. */
1015 /* Also note that comments like this one will always work. */
1016 const char line_comment_chars[] = "#";
1017
1018 char arm_line_separator_chars[] = ";";
1019
1020 /* Chars that can be used to separate mant
1021 from exp in floating point numbers. */
1022 const char EXP_CHARS[] = "eE";
1023
1024 /* Chars that mean this number is a floating point constant. */
1025 /* As in 0f12.456 */
1026 /* or 0d1.2345e12 */
1027
1028 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
1029
1030 /* Prefix characters that indicate the start of an immediate
1031 value. */
1032 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1033
1034 /* Separator character handling. */
1035
1036 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1037
1038 static inline int
1039 skip_past_char (char ** str, char c)
1040 {
1041 /* PR gas/14987: Allow for whitespace before the expected character. */
1042 skip_whitespace (*str);
1043
1044 if (**str == c)
1045 {
1046 (*str)++;
1047 return SUCCESS;
1048 }
1049 else
1050 return FAIL;
1051 }
1052
1053 #define skip_past_comma(str) skip_past_char (str, ',')
1054
1055 /* Arithmetic expressions (possibly involving symbols). */
1056
1057 /* Return TRUE if anything in the expression is a bignum. */
1058
1059 static bfd_boolean
1060 walk_no_bignums (symbolS * sp)
1061 {
1062 if (symbol_get_value_expression (sp)->X_op == O_big)
1063 return TRUE;
1064
1065 if (symbol_get_value_expression (sp)->X_add_symbol)
1066 {
1067 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1068 || (symbol_get_value_expression (sp)->X_op_symbol
1069 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1070 }
1071
1072 return FALSE;
1073 }
1074
1075 static bfd_boolean in_my_get_expression = FALSE;
1076
1077 /* Third argument to my_get_expression. */
1078 #define GE_NO_PREFIX 0
1079 #define GE_IMM_PREFIX 1
1080 #define GE_OPT_PREFIX 2
1081 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1082 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1083 #define GE_OPT_PREFIX_BIG 3
1084
1085 static int
1086 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1087 {
1088 char * save_in;
1089
1090 /* In unified syntax, all prefixes are optional. */
1091 if (unified_syntax)
1092 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1093 : GE_OPT_PREFIX;
1094
1095 switch (prefix_mode)
1096 {
1097 case GE_NO_PREFIX: break;
1098 case GE_IMM_PREFIX:
1099 if (!is_immediate_prefix (**str))
1100 {
1101 inst.error = _("immediate expression requires a # prefix");
1102 return FAIL;
1103 }
1104 (*str)++;
1105 break;
1106 case GE_OPT_PREFIX:
1107 case GE_OPT_PREFIX_BIG:
1108 if (is_immediate_prefix (**str))
1109 (*str)++;
1110 break;
1111 default:
1112 abort ();
1113 }
1114
1115 memset (ep, 0, sizeof (expressionS));
1116
1117 save_in = input_line_pointer;
1118 input_line_pointer = *str;
1119 in_my_get_expression = TRUE;
1120 expression (ep);
1121 in_my_get_expression = FALSE;
1122
1123 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1124 {
1125 /* We found a bad or missing expression in md_operand(). */
1126 *str = input_line_pointer;
1127 input_line_pointer = save_in;
1128 if (inst.error == NULL)
1129 inst.error = (ep->X_op == O_absent
1130 ? _("missing expression") :_("bad expression"));
1131 return 1;
1132 }
1133
1134 /* Get rid of any bignums now, so that we don't generate an error for which
1135 we can't establish a line number later on. Big numbers are never valid
1136 in instructions, which is where this routine is always called. */
1137 if (prefix_mode != GE_OPT_PREFIX_BIG
1138 && (ep->X_op == O_big
1139 || (ep->X_add_symbol
1140 && (walk_no_bignums (ep->X_add_symbol)
1141 || (ep->X_op_symbol
1142 && walk_no_bignums (ep->X_op_symbol))))))
1143 {
1144 inst.error = _("invalid constant");
1145 *str = input_line_pointer;
1146 input_line_pointer = save_in;
1147 return 1;
1148 }
1149
1150 *str = input_line_pointer;
1151 input_line_pointer = save_in;
1152 return SUCCESS;
1153 }
1154
1155 /* Turn a string in input_line_pointer into a floating point constant
1156 of type TYPE, and store the appropriate bytes in *LITP. The number
1157 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1158 returned, or NULL on OK.
1159
1160 Note that fp constants aren't represent in the normal way on the ARM.
1161 In big endian mode, things are as expected. However, in little endian
1162 mode fp constants are big-endian word-wise, and little-endian byte-wise
1163 within the words. For example, (double) 1.1 in big endian mode is
1164 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1165 the byte sequence 99 99 f1 3f 9a 99 99 99.
1166
1167 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1168
1169 const char *
1170 md_atof (int type, char * litP, int * sizeP)
1171 {
1172 int prec;
1173 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1174 char *t;
1175 int i;
1176
1177 switch (type)
1178 {
1179 case 'f':
1180 case 'F':
1181 case 's':
1182 case 'S':
1183 prec = 2;
1184 break;
1185
1186 case 'd':
1187 case 'D':
1188 case 'r':
1189 case 'R':
1190 prec = 4;
1191 break;
1192
1193 case 'x':
1194 case 'X':
1195 prec = 5;
1196 break;
1197
1198 case 'p':
1199 case 'P':
1200 prec = 5;
1201 break;
1202
1203 default:
1204 *sizeP = 0;
1205 return _("Unrecognized or unsupported floating point constant");
1206 }
1207
1208 t = atof_ieee (input_line_pointer, type, words);
1209 if (t)
1210 input_line_pointer = t;
1211 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1212
1213 if (target_big_endian)
1214 {
1215 for (i = 0; i < prec; i++)
1216 {
1217 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1218 litP += sizeof (LITTLENUM_TYPE);
1219 }
1220 }
1221 else
1222 {
1223 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1224 for (i = prec - 1; i >= 0; i--)
1225 {
1226 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1227 litP += sizeof (LITTLENUM_TYPE);
1228 }
1229 else
1230 /* For a 4 byte float the order of elements in `words' is 1 0.
1231 For an 8 byte float the order is 1 0 3 2. */
1232 for (i = 0; i < prec; i += 2)
1233 {
1234 md_number_to_chars (litP, (valueT) words[i + 1],
1235 sizeof (LITTLENUM_TYPE));
1236 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1237 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1238 litP += 2 * sizeof (LITTLENUM_TYPE);
1239 }
1240 }
1241
1242 return NULL;
1243 }
1244
1245 /* We handle all bad expressions here, so that we can report the faulty
1246 instruction in the error message. */
1247
1248 void
1249 md_operand (expressionS * exp)
1250 {
1251 if (in_my_get_expression)
1252 exp->X_op = O_illegal;
1253 }
1254
1255 /* Immediate values. */
1256
1257 #ifdef OBJ_ELF
1258 /* Generic immediate-value read function for use in directives.
1259 Accepts anything that 'expression' can fold to a constant.
1260 *val receives the number. */
1261
1262 static int
1263 immediate_for_directive (int *val)
1264 {
1265 expressionS exp;
1266 exp.X_op = O_illegal;
1267
1268 if (is_immediate_prefix (*input_line_pointer))
1269 {
1270 input_line_pointer++;
1271 expression (&exp);
1272 }
1273
1274 if (exp.X_op != O_constant)
1275 {
1276 as_bad (_("expected #constant"));
1277 ignore_rest_of_line ();
1278 return FAIL;
1279 }
1280 *val = exp.X_add_number;
1281 return SUCCESS;
1282 }
1283 #endif
1284
1285 /* Register parsing. */
1286
1287 /* Generic register parser. CCP points to what should be the
1288 beginning of a register name. If it is indeed a valid register
1289 name, advance CCP over it and return the reg_entry structure;
1290 otherwise return NULL. Does not issue diagnostics. */
1291
1292 static struct reg_entry *
1293 arm_reg_parse_multi (char **ccp)
1294 {
1295 char *start = *ccp;
1296 char *p;
1297 struct reg_entry *reg;
1298
1299 skip_whitespace (start);
1300
1301 #ifdef REGISTER_PREFIX
1302 if (*start != REGISTER_PREFIX)
1303 return NULL;
1304 start++;
1305 #endif
1306 #ifdef OPTIONAL_REGISTER_PREFIX
1307 if (*start == OPTIONAL_REGISTER_PREFIX)
1308 start++;
1309 #endif
1310
1311 p = start;
1312 if (!ISALPHA (*p) || !is_name_beginner (*p))
1313 return NULL;
1314
1315 do
1316 p++;
1317 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1318
1319 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1320
1321 if (!reg)
1322 return NULL;
1323
1324 *ccp = p;
1325 return reg;
1326 }
1327
1328 static int
1329 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1330 enum arm_reg_type type)
1331 {
1332 /* Alternative syntaxes are accepted for a few register classes. */
1333 switch (type)
1334 {
1335 case REG_TYPE_MVF:
1336 case REG_TYPE_MVD:
1337 case REG_TYPE_MVFX:
1338 case REG_TYPE_MVDX:
1339 /* Generic coprocessor register names are allowed for these. */
1340 if (reg && reg->type == REG_TYPE_CN)
1341 return reg->number;
1342 break;
1343
1344 case REG_TYPE_CP:
1345 /* For backward compatibility, a bare number is valid here. */
1346 {
1347 unsigned long processor = strtoul (start, ccp, 10);
1348 if (*ccp != start && processor <= 15)
1349 return processor;
1350 }
1351 /* Fall through. */
1352
1353 case REG_TYPE_MMXWC:
1354 /* WC includes WCG. ??? I'm not sure this is true for all
1355 instructions that take WC registers. */
1356 if (reg && reg->type == REG_TYPE_MMXWCG)
1357 return reg->number;
1358 break;
1359
1360 default:
1361 break;
1362 }
1363
1364 return FAIL;
1365 }
1366
1367 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1368 return value is the register number or FAIL. */
1369
1370 static int
1371 arm_reg_parse (char **ccp, enum arm_reg_type type)
1372 {
1373 char *start = *ccp;
1374 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1375 int ret;
1376
1377 /* Do not allow a scalar (reg+index) to parse as a register. */
1378 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1379 return FAIL;
1380
1381 if (reg && reg->type == type)
1382 return reg->number;
1383
1384 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1385 return ret;
1386
1387 *ccp = start;
1388 return FAIL;
1389 }
1390
1391 /* Parse a Neon type specifier. *STR should point at the leading '.'
1392 character. Does no verification at this stage that the type fits the opcode
1393 properly. E.g.,
1394
1395 .i32.i32.s16
1396 .s32.f32
1397 .u16
1398
1399 Can all be legally parsed by this function.
1400
1401 Fills in neon_type struct pointer with parsed information, and updates STR
1402 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1403 type, FAIL if not. */
1404
1405 static int
1406 parse_neon_type (struct neon_type *type, char **str)
1407 {
1408 char *ptr = *str;
1409
1410 if (type)
1411 type->elems = 0;
1412
1413 while (type->elems < NEON_MAX_TYPE_ELS)
1414 {
1415 enum neon_el_type thistype = NT_untyped;
1416 unsigned thissize = -1u;
1417
1418 if (*ptr != '.')
1419 break;
1420
1421 ptr++;
1422
1423 /* Just a size without an explicit type. */
1424 if (ISDIGIT (*ptr))
1425 goto parsesize;
1426
1427 switch (TOLOWER (*ptr))
1428 {
1429 case 'i': thistype = NT_integer; break;
1430 case 'f': thistype = NT_float; break;
1431 case 'p': thistype = NT_poly; break;
1432 case 's': thistype = NT_signed; break;
1433 case 'u': thistype = NT_unsigned; break;
1434 case 'd':
1435 thistype = NT_float;
1436 thissize = 64;
1437 ptr++;
1438 goto done;
1439 default:
1440 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1441 return FAIL;
1442 }
1443
1444 ptr++;
1445
1446 /* .f is an abbreviation for .f32. */
1447 if (thistype == NT_float && !ISDIGIT (*ptr))
1448 thissize = 32;
1449 else
1450 {
1451 parsesize:
1452 thissize = strtoul (ptr, &ptr, 10);
1453
1454 if (thissize != 8 && thissize != 16 && thissize != 32
1455 && thissize != 64)
1456 {
1457 as_bad (_("bad size %d in type specifier"), thissize);
1458 return FAIL;
1459 }
1460 }
1461
1462 done:
1463 if (type)
1464 {
1465 type->el[type->elems].type = thistype;
1466 type->el[type->elems].size = thissize;
1467 type->elems++;
1468 }
1469 }
1470
1471 /* Empty/missing type is not a successful parse. */
1472 if (type->elems == 0)
1473 return FAIL;
1474
1475 *str = ptr;
1476
1477 return SUCCESS;
1478 }
1479
1480 /* Errors may be set multiple times during parsing or bit encoding
1481 (particularly in the Neon bits), but usually the earliest error which is set
1482 will be the most meaningful. Avoid overwriting it with later (cascading)
1483 errors by calling this function. */
1484
1485 static void
1486 first_error (const char *err)
1487 {
1488 if (!inst.error)
1489 inst.error = err;
1490 }
1491
1492 /* Parse a single type, e.g. ".s32", leading period included. */
1493 static int
1494 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1495 {
1496 char *str = *ccp;
1497 struct neon_type optype;
1498
1499 if (*str == '.')
1500 {
1501 if (parse_neon_type (&optype, &str) == SUCCESS)
1502 {
1503 if (optype.elems == 1)
1504 *vectype = optype.el[0];
1505 else
1506 {
1507 first_error (_("only one type should be specified for operand"));
1508 return FAIL;
1509 }
1510 }
1511 else
1512 {
1513 first_error (_("vector type expected"));
1514 return FAIL;
1515 }
1516 }
1517 else
1518 return FAIL;
1519
1520 *ccp = str;
1521
1522 return SUCCESS;
1523 }
1524
1525 /* Special meanings for indices (which have a range of 0-7), which will fit into
1526 a 4-bit integer. */
1527
1528 #define NEON_ALL_LANES 15
1529 #define NEON_INTERLEAVE_LANES 14
1530
1531 /* Record a use of the given feature. */
1532 static void
1533 record_feature_use (const arm_feature_set *feature)
1534 {
1535 if (thumb_mode)
1536 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1537 else
1538 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1539 }
1540
1541 /* If the given feature available in the selected CPU, mark it as used.
1542 Returns TRUE iff feature is available. */
1543 static bfd_boolean
1544 mark_feature_used (const arm_feature_set *feature)
1545 {
1546
1547 /* Do not support the use of MVE only instructions when in auto-detection or
1548 -march=all. */
1549 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1550 && ARM_CPU_IS_ANY (cpu_variant))
1551 {
1552 first_error (BAD_MVE_AUTO);
1553 return FALSE;
1554 }
1555 /* Ensure the option is valid on the current architecture. */
1556 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1557 return FALSE;
1558
1559 /* Add the appropriate architecture feature for the barrier option used.
1560 */
1561 record_feature_use (feature);
1562
1563 return TRUE;
1564 }
1565
1566 /* Parse either a register or a scalar, with an optional type. Return the
1567 register number, and optionally fill in the actual type of the register
1568 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1569 type/index information in *TYPEINFO. */
1570
1571 static int
1572 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1573 enum arm_reg_type *rtype,
1574 struct neon_typed_alias *typeinfo)
1575 {
1576 char *str = *ccp;
1577 struct reg_entry *reg = arm_reg_parse_multi (&str);
1578 struct neon_typed_alias atype;
1579 struct neon_type_el parsetype;
1580
1581 atype.defined = 0;
1582 atype.index = -1;
1583 atype.eltype.type = NT_invtype;
1584 atype.eltype.size = -1;
1585
1586 /* Try alternate syntax for some types of register. Note these are mutually
1587 exclusive with the Neon syntax extensions. */
1588 if (reg == NULL)
1589 {
1590 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1591 if (altreg != FAIL)
1592 *ccp = str;
1593 if (typeinfo)
1594 *typeinfo = atype;
1595 return altreg;
1596 }
1597
1598 /* Undo polymorphism when a set of register types may be accepted. */
1599 if ((type == REG_TYPE_NDQ
1600 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1601 || (type == REG_TYPE_VFSD
1602 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1603 || (type == REG_TYPE_NSDQ
1604 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1605 || reg->type == REG_TYPE_NQ))
1606 || (type == REG_TYPE_NSD
1607 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1608 || (type == REG_TYPE_MMXWC
1609 && (reg->type == REG_TYPE_MMXWCG)))
1610 type = (enum arm_reg_type) reg->type;
1611
1612 if (type == REG_TYPE_MQ)
1613 {
1614 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1615 return FAIL;
1616
1617 if (!reg || reg->type != REG_TYPE_NQ)
1618 return FAIL;
1619
1620 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1621 {
1622 first_error (_("expected MVE register [q0..q7]"));
1623 return FAIL;
1624 }
1625 type = REG_TYPE_NQ;
1626 }
1627 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1628 && (type == REG_TYPE_NQ))
1629 return FAIL;
1630
1631
1632 if (type != reg->type)
1633 return FAIL;
1634
1635 if (reg->neon)
1636 atype = *reg->neon;
1637
1638 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1639 {
1640 if ((atype.defined & NTA_HASTYPE) != 0)
1641 {
1642 first_error (_("can't redefine type for operand"));
1643 return FAIL;
1644 }
1645 atype.defined |= NTA_HASTYPE;
1646 atype.eltype = parsetype;
1647 }
1648
1649 if (skip_past_char (&str, '[') == SUCCESS)
1650 {
1651 if (type != REG_TYPE_VFD
1652 && !(type == REG_TYPE_VFS
1653 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1654 {
1655 first_error (_("only D registers may be indexed"));
1656 return FAIL;
1657 }
1658
1659 if ((atype.defined & NTA_HASINDEX) != 0)
1660 {
1661 first_error (_("can't change index for operand"));
1662 return FAIL;
1663 }
1664
1665 atype.defined |= NTA_HASINDEX;
1666
1667 if (skip_past_char (&str, ']') == SUCCESS)
1668 atype.index = NEON_ALL_LANES;
1669 else
1670 {
1671 expressionS exp;
1672
1673 my_get_expression (&exp, &str, GE_NO_PREFIX);
1674
1675 if (exp.X_op != O_constant)
1676 {
1677 first_error (_("constant expression required"));
1678 return FAIL;
1679 }
1680
1681 if (skip_past_char (&str, ']') == FAIL)
1682 return FAIL;
1683
1684 atype.index = exp.X_add_number;
1685 }
1686 }
1687
1688 if (typeinfo)
1689 *typeinfo = atype;
1690
1691 if (rtype)
1692 *rtype = type;
1693
1694 *ccp = str;
1695
1696 return reg->number;
1697 }
1698
1699 /* Like arm_reg_parse, but also allow the following extra features:
1700 - If RTYPE is non-zero, return the (possibly restricted) type of the
1701 register (e.g. Neon double or quad reg when either has been requested).
1702 - If this is a Neon vector type with additional type information, fill
1703 in the struct pointed to by VECTYPE (if non-NULL).
1704 This function will fault on encountering a scalar. */
1705
1706 static int
1707 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1708 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1709 {
1710 struct neon_typed_alias atype;
1711 char *str = *ccp;
1712 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1713
1714 if (reg == FAIL)
1715 return FAIL;
1716
1717 /* Do not allow regname(... to parse as a register. */
1718 if (*str == '(')
1719 return FAIL;
1720
1721 /* Do not allow a scalar (reg+index) to parse as a register. */
1722 if ((atype.defined & NTA_HASINDEX) != 0)
1723 {
1724 first_error (_("register operand expected, but got scalar"));
1725 return FAIL;
1726 }
1727
1728 if (vectype)
1729 *vectype = atype.eltype;
1730
1731 *ccp = str;
1732
1733 return reg;
1734 }
1735
1736 #define NEON_SCALAR_REG(X) ((X) >> 4)
1737 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1738
1739 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1740 have enough information to be able to do a good job bounds-checking. So, we
1741 just do easy checks here, and do further checks later. */
1742
1743 static int
1744 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1745 {
1746 int reg;
1747 char *str = *ccp;
1748 struct neon_typed_alias atype;
1749 enum arm_reg_type reg_type = REG_TYPE_VFD;
1750
1751 if (elsize == 4)
1752 reg_type = REG_TYPE_VFS;
1753
1754 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1755
1756 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1757 return FAIL;
1758
1759 if (atype.index == NEON_ALL_LANES)
1760 {
1761 first_error (_("scalar must have an index"));
1762 return FAIL;
1763 }
1764 else if (atype.index >= 64 / elsize)
1765 {
1766 first_error (_("scalar index out of range"));
1767 return FAIL;
1768 }
1769
1770 if (type)
1771 *type = atype.eltype;
1772
1773 *ccp = str;
1774
1775 return reg * 16 + atype.index;
1776 }
1777
1778 /* Types of registers in a list. */
1779
1780 enum reg_list_els
1781 {
1782 REGLIST_RN,
1783 REGLIST_CLRM,
1784 REGLIST_VFP_S,
1785 REGLIST_VFP_S_VPR,
1786 REGLIST_VFP_D,
1787 REGLIST_VFP_D_VPR,
1788 REGLIST_NEON_D
1789 };
1790
1791 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1792
1793 static long
1794 parse_reg_list (char ** strp, enum reg_list_els etype)
1795 {
1796 char *str = *strp;
1797 long range = 0;
1798 int another_range;
1799
1800 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1801
1802 /* We come back here if we get ranges concatenated by '+' or '|'. */
1803 do
1804 {
1805 skip_whitespace (str);
1806
1807 another_range = 0;
1808
1809 if (*str == '{')
1810 {
1811 int in_range = 0;
1812 int cur_reg = -1;
1813
1814 str++;
1815 do
1816 {
1817 int reg;
1818 const char apsr_str[] = "apsr";
1819 int apsr_str_len = strlen (apsr_str);
1820
1821 reg = arm_reg_parse (&str, REGLIST_RN);
1822 if (etype == REGLIST_CLRM)
1823 {
1824 if (reg == REG_SP || reg == REG_PC)
1825 reg = FAIL;
1826 else if (reg == FAIL
1827 && !strncasecmp (str, apsr_str, apsr_str_len)
1828 && !ISALPHA (*(str + apsr_str_len)))
1829 {
1830 reg = 15;
1831 str += apsr_str_len;
1832 }
1833
1834 if (reg == FAIL)
1835 {
1836 first_error (_("r0-r12, lr or APSR expected"));
1837 return FAIL;
1838 }
1839 }
1840 else /* etype == REGLIST_RN. */
1841 {
1842 if (reg == FAIL)
1843 {
1844 first_error (_(reg_expected_msgs[REGLIST_RN]));
1845 return FAIL;
1846 }
1847 }
1848
1849 if (in_range)
1850 {
1851 int i;
1852
1853 if (reg <= cur_reg)
1854 {
1855 first_error (_("bad range in register list"));
1856 return FAIL;
1857 }
1858
1859 for (i = cur_reg + 1; i < reg; i++)
1860 {
1861 if (range & (1 << i))
1862 as_tsktsk
1863 (_("Warning: duplicated register (r%d) in register list"),
1864 i);
1865 else
1866 range |= 1 << i;
1867 }
1868 in_range = 0;
1869 }
1870
1871 if (range & (1 << reg))
1872 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1873 reg);
1874 else if (reg <= cur_reg)
1875 as_tsktsk (_("Warning: register range not in ascending order"));
1876
1877 range |= 1 << reg;
1878 cur_reg = reg;
1879 }
1880 while (skip_past_comma (&str) != FAIL
1881 || (in_range = 1, *str++ == '-'));
1882 str--;
1883
1884 if (skip_past_char (&str, '}') == FAIL)
1885 {
1886 first_error (_("missing `}'"));
1887 return FAIL;
1888 }
1889 }
1890 else if (etype == REGLIST_RN)
1891 {
1892 expressionS exp;
1893
1894 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1895 return FAIL;
1896
1897 if (exp.X_op == O_constant)
1898 {
1899 if (exp.X_add_number
1900 != (exp.X_add_number & 0x0000ffff))
1901 {
1902 inst.error = _("invalid register mask");
1903 return FAIL;
1904 }
1905
1906 if ((range & exp.X_add_number) != 0)
1907 {
1908 int regno = range & exp.X_add_number;
1909
1910 regno &= -regno;
1911 regno = (1 << regno) - 1;
1912 as_tsktsk
1913 (_("Warning: duplicated register (r%d) in register list"),
1914 regno);
1915 }
1916
1917 range |= exp.X_add_number;
1918 }
1919 else
1920 {
1921 if (inst.relocs[0].type != 0)
1922 {
1923 inst.error = _("expression too complex");
1924 return FAIL;
1925 }
1926
1927 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1928 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1929 inst.relocs[0].pc_rel = 0;
1930 }
1931 }
1932
1933 if (*str == '|' || *str == '+')
1934 {
1935 str++;
1936 another_range = 1;
1937 }
1938 }
1939 while (another_range);
1940
1941 *strp = str;
1942 return range;
1943 }
1944
1945 /* Parse a VFP register list. If the string is invalid return FAIL.
1946 Otherwise return the number of registers, and set PBASE to the first
1947 register. Parses registers of type ETYPE.
1948 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1949 - Q registers can be used to specify pairs of D registers
1950 - { } can be omitted from around a singleton register list
1951 FIXME: This is not implemented, as it would require backtracking in
1952 some cases, e.g.:
1953 vtbl.8 d3,d4,d5
1954 This could be done (the meaning isn't really ambiguous), but doesn't
1955 fit in well with the current parsing framework.
1956 - 32 D registers may be used (also true for VFPv3).
1957 FIXME: Types are ignored in these register lists, which is probably a
1958 bug. */
1959
1960 static int
1961 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1962 bfd_boolean *partial_match)
1963 {
1964 char *str = *ccp;
1965 int base_reg;
1966 int new_base;
1967 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1968 int max_regs = 0;
1969 int count = 0;
1970 int warned = 0;
1971 unsigned long mask = 0;
1972 int i;
1973 bfd_boolean vpr_seen = FALSE;
1974 bfd_boolean expect_vpr =
1975 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1976
1977 if (skip_past_char (&str, '{') == FAIL)
1978 {
1979 inst.error = _("expecting {");
1980 return FAIL;
1981 }
1982
1983 switch (etype)
1984 {
1985 case REGLIST_VFP_S:
1986 case REGLIST_VFP_S_VPR:
1987 regtype = REG_TYPE_VFS;
1988 max_regs = 32;
1989 break;
1990
1991 case REGLIST_VFP_D:
1992 case REGLIST_VFP_D_VPR:
1993 regtype = REG_TYPE_VFD;
1994 break;
1995
1996 case REGLIST_NEON_D:
1997 regtype = REG_TYPE_NDQ;
1998 break;
1999
2000 default:
2001 gas_assert (0);
2002 }
2003
2004 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2005 {
2006 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2007 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2008 {
2009 max_regs = 32;
2010 if (thumb_mode)
2011 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2012 fpu_vfp_ext_d32);
2013 else
2014 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2015 fpu_vfp_ext_d32);
2016 }
2017 else
2018 max_regs = 16;
2019 }
2020
2021 base_reg = max_regs;
2022 *partial_match = FALSE;
2023
2024 do
2025 {
2026 int setmask = 1, addregs = 1;
2027 const char vpr_str[] = "vpr";
2028 int vpr_str_len = strlen (vpr_str);
2029
2030 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2031
2032 if (expect_vpr)
2033 {
2034 if (new_base == FAIL
2035 && !strncasecmp (str, vpr_str, vpr_str_len)
2036 && !ISALPHA (*(str + vpr_str_len))
2037 && !vpr_seen)
2038 {
2039 vpr_seen = TRUE;
2040 str += vpr_str_len;
2041 if (count == 0)
2042 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2043 }
2044 else if (vpr_seen)
2045 {
2046 first_error (_("VPR expected last"));
2047 return FAIL;
2048 }
2049 else if (new_base == FAIL)
2050 {
2051 if (regtype == REG_TYPE_VFS)
2052 first_error (_("VFP single precision register or VPR "
2053 "expected"));
2054 else /* regtype == REG_TYPE_VFD. */
2055 first_error (_("VFP/Neon double precision register or VPR "
2056 "expected"));
2057 return FAIL;
2058 }
2059 }
2060 else if (new_base == FAIL)
2061 {
2062 first_error (_(reg_expected_msgs[regtype]));
2063 return FAIL;
2064 }
2065
2066 *partial_match = TRUE;
2067 if (vpr_seen)
2068 continue;
2069
2070 if (new_base >= max_regs)
2071 {
2072 first_error (_("register out of range in list"));
2073 return FAIL;
2074 }
2075
2076 /* Note: a value of 2 * n is returned for the register Q<n>. */
2077 if (regtype == REG_TYPE_NQ)
2078 {
2079 setmask = 3;
2080 addregs = 2;
2081 }
2082
2083 if (new_base < base_reg)
2084 base_reg = new_base;
2085
2086 if (mask & (setmask << new_base))
2087 {
2088 first_error (_("invalid register list"));
2089 return FAIL;
2090 }
2091
2092 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2093 {
2094 as_tsktsk (_("register list not in ascending order"));
2095 warned = 1;
2096 }
2097
2098 mask |= setmask << new_base;
2099 count += addregs;
2100
2101 if (*str == '-') /* We have the start of a range expression */
2102 {
2103 int high_range;
2104
2105 str++;
2106
2107 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2108 == FAIL)
2109 {
2110 inst.error = gettext (reg_expected_msgs[regtype]);
2111 return FAIL;
2112 }
2113
2114 if (high_range >= max_regs)
2115 {
2116 first_error (_("register out of range in list"));
2117 return FAIL;
2118 }
2119
2120 if (regtype == REG_TYPE_NQ)
2121 high_range = high_range + 1;
2122
2123 if (high_range <= new_base)
2124 {
2125 inst.error = _("register range not in ascending order");
2126 return FAIL;
2127 }
2128
2129 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2130 {
2131 if (mask & (setmask << new_base))
2132 {
2133 inst.error = _("invalid register list");
2134 return FAIL;
2135 }
2136
2137 mask |= setmask << new_base;
2138 count += addregs;
2139 }
2140 }
2141 }
2142 while (skip_past_comma (&str) != FAIL);
2143
2144 str++;
2145
2146 /* Sanity check -- should have raised a parse error above. */
2147 if ((!vpr_seen && count == 0) || count > max_regs)
2148 abort ();
2149
2150 *pbase = base_reg;
2151
2152 if (expect_vpr && !vpr_seen)
2153 {
2154 first_error (_("VPR expected last"));
2155 return FAIL;
2156 }
2157
2158 /* Final test -- the registers must be consecutive. */
2159 mask >>= base_reg;
2160 for (i = 0; i < count; i++)
2161 {
2162 if ((mask & (1u << i)) == 0)
2163 {
2164 inst.error = _("non-contiguous register range");
2165 return FAIL;
2166 }
2167 }
2168
2169 *ccp = str;
2170
2171 return count;
2172 }
2173
2174 /* True if two alias types are the same. */
2175
2176 static bfd_boolean
2177 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2178 {
2179 if (!a && !b)
2180 return TRUE;
2181
2182 if (!a || !b)
2183 return FALSE;
2184
2185 if (a->defined != b->defined)
2186 return FALSE;
2187
2188 if ((a->defined & NTA_HASTYPE) != 0
2189 && (a->eltype.type != b->eltype.type
2190 || a->eltype.size != b->eltype.size))
2191 return FALSE;
2192
2193 if ((a->defined & NTA_HASINDEX) != 0
2194 && (a->index != b->index))
2195 return FALSE;
2196
2197 return TRUE;
2198 }
2199
2200 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2201 The base register is put in *PBASE.
2202 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2203 the return value.
2204 The register stride (minus one) is put in bit 4 of the return value.
2205 Bits [6:5] encode the list length (minus one).
2206 The type of the list elements is put in *ELTYPE, if non-NULL. */
2207
2208 #define NEON_LANE(X) ((X) & 0xf)
2209 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2210 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2211
2212 static int
2213 parse_neon_el_struct_list (char **str, unsigned *pbase,
2214 struct neon_type_el *eltype)
2215 {
2216 char *ptr = *str;
2217 int base_reg = -1;
2218 int reg_incr = -1;
2219 int count = 0;
2220 int lane = -1;
2221 int leading_brace = 0;
2222 enum arm_reg_type rtype = REG_TYPE_NDQ;
2223 const char *const incr_error = _("register stride must be 1 or 2");
2224 const char *const type_error = _("mismatched element/structure types in list");
2225 struct neon_typed_alias firsttype;
2226 firsttype.defined = 0;
2227 firsttype.eltype.type = NT_invtype;
2228 firsttype.eltype.size = -1;
2229 firsttype.index = -1;
2230
2231 if (skip_past_char (&ptr, '{') == SUCCESS)
2232 leading_brace = 1;
2233
2234 do
2235 {
2236 struct neon_typed_alias atype;
2237 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2238
2239 if (getreg == FAIL)
2240 {
2241 first_error (_(reg_expected_msgs[rtype]));
2242 return FAIL;
2243 }
2244
2245 if (base_reg == -1)
2246 {
2247 base_reg = getreg;
2248 if (rtype == REG_TYPE_NQ)
2249 {
2250 reg_incr = 1;
2251 }
2252 firsttype = atype;
2253 }
2254 else if (reg_incr == -1)
2255 {
2256 reg_incr = getreg - base_reg;
2257 if (reg_incr < 1 || reg_incr > 2)
2258 {
2259 first_error (_(incr_error));
2260 return FAIL;
2261 }
2262 }
2263 else if (getreg != base_reg + reg_incr * count)
2264 {
2265 first_error (_(incr_error));
2266 return FAIL;
2267 }
2268
2269 if (! neon_alias_types_same (&atype, &firsttype))
2270 {
2271 first_error (_(type_error));
2272 return FAIL;
2273 }
2274
2275 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2276 modes. */
2277 if (ptr[0] == '-')
2278 {
2279 struct neon_typed_alias htype;
2280 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2281 if (lane == -1)
2282 lane = NEON_INTERLEAVE_LANES;
2283 else if (lane != NEON_INTERLEAVE_LANES)
2284 {
2285 first_error (_(type_error));
2286 return FAIL;
2287 }
2288 if (reg_incr == -1)
2289 reg_incr = 1;
2290 else if (reg_incr != 1)
2291 {
2292 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2293 return FAIL;
2294 }
2295 ptr++;
2296 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2297 if (hireg == FAIL)
2298 {
2299 first_error (_(reg_expected_msgs[rtype]));
2300 return FAIL;
2301 }
2302 if (! neon_alias_types_same (&htype, &firsttype))
2303 {
2304 first_error (_(type_error));
2305 return FAIL;
2306 }
2307 count += hireg + dregs - getreg;
2308 continue;
2309 }
2310
2311 /* If we're using Q registers, we can't use [] or [n] syntax. */
2312 if (rtype == REG_TYPE_NQ)
2313 {
2314 count += 2;
2315 continue;
2316 }
2317
2318 if ((atype.defined & NTA_HASINDEX) != 0)
2319 {
2320 if (lane == -1)
2321 lane = atype.index;
2322 else if (lane != atype.index)
2323 {
2324 first_error (_(type_error));
2325 return FAIL;
2326 }
2327 }
2328 else if (lane == -1)
2329 lane = NEON_INTERLEAVE_LANES;
2330 else if (lane != NEON_INTERLEAVE_LANES)
2331 {
2332 first_error (_(type_error));
2333 return FAIL;
2334 }
2335 count++;
2336 }
2337 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2338
2339 /* No lane set by [x]. We must be interleaving structures. */
2340 if (lane == -1)
2341 lane = NEON_INTERLEAVE_LANES;
2342
2343 /* Sanity check. */
2344 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2345 || (count > 1 && reg_incr == -1))
2346 {
2347 first_error (_("error parsing element/structure list"));
2348 return FAIL;
2349 }
2350
2351 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2352 {
2353 first_error (_("expected }"));
2354 return FAIL;
2355 }
2356
2357 if (reg_incr == -1)
2358 reg_incr = 1;
2359
2360 if (eltype)
2361 *eltype = firsttype.eltype;
2362
2363 *pbase = base_reg;
2364 *str = ptr;
2365
2366 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2367 }
2368
2369 /* Parse an explicit relocation suffix on an expression. This is
2370 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2371 arm_reloc_hsh contains no entries, so this function can only
2372 succeed if there is no () after the word. Returns -1 on error,
2373 BFD_RELOC_UNUSED if there wasn't any suffix. */
2374
2375 static int
2376 parse_reloc (char **str)
2377 {
2378 struct reloc_entry *r;
2379 char *p, *q;
2380
2381 if (**str != '(')
2382 return BFD_RELOC_UNUSED;
2383
2384 p = *str + 1;
2385 q = p;
2386
2387 while (*q && *q != ')' && *q != ',')
2388 q++;
2389 if (*q != ')')
2390 return -1;
2391
2392 if ((r = (struct reloc_entry *)
2393 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2394 return -1;
2395
2396 *str = q + 1;
2397 return r->reloc;
2398 }
2399
2400 /* Directives: register aliases. */
2401
2402 static struct reg_entry *
2403 insert_reg_alias (char *str, unsigned number, int type)
2404 {
2405 struct reg_entry *new_reg;
2406 const char *name;
2407
2408 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2409 {
2410 if (new_reg->builtin)
2411 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2412
2413 /* Only warn about a redefinition if it's not defined as the
2414 same register. */
2415 else if (new_reg->number != number || new_reg->type != type)
2416 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2417
2418 return NULL;
2419 }
2420
2421 name = xstrdup (str);
2422 new_reg = XNEW (struct reg_entry);
2423
2424 new_reg->name = name;
2425 new_reg->number = number;
2426 new_reg->type = type;
2427 new_reg->builtin = FALSE;
2428 new_reg->neon = NULL;
2429
2430 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2431 abort ();
2432
2433 return new_reg;
2434 }
2435
2436 static void
2437 insert_neon_reg_alias (char *str, int number, int type,
2438 struct neon_typed_alias *atype)
2439 {
2440 struct reg_entry *reg = insert_reg_alias (str, number, type);
2441
2442 if (!reg)
2443 {
2444 first_error (_("attempt to redefine typed alias"));
2445 return;
2446 }
2447
2448 if (atype)
2449 {
2450 reg->neon = XNEW (struct neon_typed_alias);
2451 *reg->neon = *atype;
2452 }
2453 }
2454
2455 /* Look for the .req directive. This is of the form:
2456
2457 new_register_name .req existing_register_name
2458
2459 If we find one, or if it looks sufficiently like one that we want to
2460 handle any error here, return TRUE. Otherwise return FALSE. */
2461
2462 static bfd_boolean
2463 create_register_alias (char * newname, char *p)
2464 {
2465 struct reg_entry *old;
2466 char *oldname, *nbuf;
2467 size_t nlen;
2468
2469 /* The input scrubber ensures that whitespace after the mnemonic is
2470 collapsed to single spaces. */
2471 oldname = p;
2472 if (strncmp (oldname, " .req ", 6) != 0)
2473 return FALSE;
2474
2475 oldname += 6;
2476 if (*oldname == '\0')
2477 return FALSE;
2478
2479 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2480 if (!old)
2481 {
2482 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2483 return TRUE;
2484 }
2485
2486 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2487 the desired alias name, and p points to its end. If not, then
2488 the desired alias name is in the global original_case_string. */
2489 #ifdef TC_CASE_SENSITIVE
2490 nlen = p - newname;
2491 #else
2492 newname = original_case_string;
2493 nlen = strlen (newname);
2494 #endif
2495
2496 nbuf = xmemdup0 (newname, nlen);
2497
2498 /* Create aliases under the new name as stated; an all-lowercase
2499 version of the new name; and an all-uppercase version of the new
2500 name. */
2501 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2502 {
2503 for (p = nbuf; *p; p++)
2504 *p = TOUPPER (*p);
2505
2506 if (strncmp (nbuf, newname, nlen))
2507 {
2508 /* If this attempt to create an additional alias fails, do not bother
2509 trying to create the all-lower case alias. We will fail and issue
2510 a second, duplicate error message. This situation arises when the
2511 programmer does something like:
2512 foo .req r0
2513 Foo .req r1
2514 The second .req creates the "Foo" alias but then fails to create
2515 the artificial FOO alias because it has already been created by the
2516 first .req. */
2517 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2518 {
2519 free (nbuf);
2520 return TRUE;
2521 }
2522 }
2523
2524 for (p = nbuf; *p; p++)
2525 *p = TOLOWER (*p);
2526
2527 if (strncmp (nbuf, newname, nlen))
2528 insert_reg_alias (nbuf, old->number, old->type);
2529 }
2530
2531 free (nbuf);
2532 return TRUE;
2533 }
2534
2535 /* Create a Neon typed/indexed register alias using directives, e.g.:
2536 X .dn d5.s32[1]
2537 Y .qn 6.s16
2538 Z .dn d7
2539 T .dn Z[0]
2540 These typed registers can be used instead of the types specified after the
2541 Neon mnemonic, so long as all operands given have types. Types can also be
2542 specified directly, e.g.:
2543 vadd d0.s32, d1.s32, d2.s32 */
2544
2545 static bfd_boolean
2546 create_neon_reg_alias (char *newname, char *p)
2547 {
2548 enum arm_reg_type basetype;
2549 struct reg_entry *basereg;
2550 struct reg_entry mybasereg;
2551 struct neon_type ntype;
2552 struct neon_typed_alias typeinfo;
2553 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2554 int namelen;
2555
2556 typeinfo.defined = 0;
2557 typeinfo.eltype.type = NT_invtype;
2558 typeinfo.eltype.size = -1;
2559 typeinfo.index = -1;
2560
2561 nameend = p;
2562
2563 if (strncmp (p, " .dn ", 5) == 0)
2564 basetype = REG_TYPE_VFD;
2565 else if (strncmp (p, " .qn ", 5) == 0)
2566 basetype = REG_TYPE_NQ;
2567 else
2568 return FALSE;
2569
2570 p += 5;
2571
2572 if (*p == '\0')
2573 return FALSE;
2574
2575 basereg = arm_reg_parse_multi (&p);
2576
2577 if (basereg && basereg->type != basetype)
2578 {
2579 as_bad (_("bad type for register"));
2580 return FALSE;
2581 }
2582
2583 if (basereg == NULL)
2584 {
2585 expressionS exp;
2586 /* Try parsing as an integer. */
2587 my_get_expression (&exp, &p, GE_NO_PREFIX);
2588 if (exp.X_op != O_constant)
2589 {
2590 as_bad (_("expression must be constant"));
2591 return FALSE;
2592 }
2593 basereg = &mybasereg;
2594 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2595 : exp.X_add_number;
2596 basereg->neon = 0;
2597 }
2598
2599 if (basereg->neon)
2600 typeinfo = *basereg->neon;
2601
2602 if (parse_neon_type (&ntype, &p) == SUCCESS)
2603 {
2604 /* We got a type. */
2605 if (typeinfo.defined & NTA_HASTYPE)
2606 {
2607 as_bad (_("can't redefine the type of a register alias"));
2608 return FALSE;
2609 }
2610
2611 typeinfo.defined |= NTA_HASTYPE;
2612 if (ntype.elems != 1)
2613 {
2614 as_bad (_("you must specify a single type only"));
2615 return FALSE;
2616 }
2617 typeinfo.eltype = ntype.el[0];
2618 }
2619
2620 if (skip_past_char (&p, '[') == SUCCESS)
2621 {
2622 expressionS exp;
2623 /* We got a scalar index. */
2624
2625 if (typeinfo.defined & NTA_HASINDEX)
2626 {
2627 as_bad (_("can't redefine the index of a scalar alias"));
2628 return FALSE;
2629 }
2630
2631 my_get_expression (&exp, &p, GE_NO_PREFIX);
2632
2633 if (exp.X_op != O_constant)
2634 {
2635 as_bad (_("scalar index must be constant"));
2636 return FALSE;
2637 }
2638
2639 typeinfo.defined |= NTA_HASINDEX;
2640 typeinfo.index = exp.X_add_number;
2641
2642 if (skip_past_char (&p, ']') == FAIL)
2643 {
2644 as_bad (_("expecting ]"));
2645 return FALSE;
2646 }
2647 }
2648
2649 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2650 the desired alias name, and p points to its end. If not, then
2651 the desired alias name is in the global original_case_string. */
2652 #ifdef TC_CASE_SENSITIVE
2653 namelen = nameend - newname;
2654 #else
2655 newname = original_case_string;
2656 namelen = strlen (newname);
2657 #endif
2658
2659 namebuf = xmemdup0 (newname, namelen);
2660
2661 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2662 typeinfo.defined != 0 ? &typeinfo : NULL);
2663
2664 /* Insert name in all uppercase. */
2665 for (p = namebuf; *p; p++)
2666 *p = TOUPPER (*p);
2667
2668 if (strncmp (namebuf, newname, namelen))
2669 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2670 typeinfo.defined != 0 ? &typeinfo : NULL);
2671
2672 /* Insert name in all lowercase. */
2673 for (p = namebuf; *p; p++)
2674 *p = TOLOWER (*p);
2675
2676 if (strncmp (namebuf, newname, namelen))
2677 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2678 typeinfo.defined != 0 ? &typeinfo : NULL);
2679
2680 free (namebuf);
2681 return TRUE;
2682 }
2683
2684 /* Should never be called, as .req goes between the alias and the
2685 register name, not at the beginning of the line. */
2686
2687 static void
2688 s_req (int a ATTRIBUTE_UNUSED)
2689 {
2690 as_bad (_("invalid syntax for .req directive"));
2691 }
2692
2693 static void
2694 s_dn (int a ATTRIBUTE_UNUSED)
2695 {
2696 as_bad (_("invalid syntax for .dn directive"));
2697 }
2698
2699 static void
2700 s_qn (int a ATTRIBUTE_UNUSED)
2701 {
2702 as_bad (_("invalid syntax for .qn directive"));
2703 }
2704
2705 /* The .unreq directive deletes an alias which was previously defined
2706 by .req. For example:
2707
2708 my_alias .req r11
2709 .unreq my_alias */
2710
2711 static void
2712 s_unreq (int a ATTRIBUTE_UNUSED)
2713 {
2714 char * name;
2715 char saved_char;
2716
2717 name = input_line_pointer;
2718
2719 while (*input_line_pointer != 0
2720 && *input_line_pointer != ' '
2721 && *input_line_pointer != '\n')
2722 ++input_line_pointer;
2723
2724 saved_char = *input_line_pointer;
2725 *input_line_pointer = 0;
2726
2727 if (!*name)
2728 as_bad (_("invalid syntax for .unreq directive"));
2729 else
2730 {
2731 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2732 name);
2733
2734 if (!reg)
2735 as_bad (_("unknown register alias '%s'"), name);
2736 else if (reg->builtin)
2737 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2738 name);
2739 else
2740 {
2741 char * p;
2742 char * nbuf;
2743
2744 hash_delete (arm_reg_hsh, name, FALSE);
2745 free ((char *) reg->name);
2746 if (reg->neon)
2747 free (reg->neon);
2748 free (reg);
2749
2750 /* Also locate the all upper case and all lower case versions.
2751 Do not complain if we cannot find one or the other as it
2752 was probably deleted above. */
2753
2754 nbuf = strdup (name);
2755 for (p = nbuf; *p; p++)
2756 *p = TOUPPER (*p);
2757 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2758 if (reg)
2759 {
2760 hash_delete (arm_reg_hsh, nbuf, FALSE);
2761 free ((char *) reg->name);
2762 if (reg->neon)
2763 free (reg->neon);
2764 free (reg);
2765 }
2766
2767 for (p = nbuf; *p; p++)
2768 *p = TOLOWER (*p);
2769 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2770 if (reg)
2771 {
2772 hash_delete (arm_reg_hsh, nbuf, FALSE);
2773 free ((char *) reg->name);
2774 if (reg->neon)
2775 free (reg->neon);
2776 free (reg);
2777 }
2778
2779 free (nbuf);
2780 }
2781 }
2782
2783 *input_line_pointer = saved_char;
2784 demand_empty_rest_of_line ();
2785 }
2786
2787 /* Directives: Instruction set selection. */
2788
2789 #ifdef OBJ_ELF
2790 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2791 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2792 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2793 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2794
2795 /* Create a new mapping symbol for the transition to STATE. */
2796
2797 static void
2798 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2799 {
2800 symbolS * symbolP;
2801 const char * symname;
2802 int type;
2803
2804 switch (state)
2805 {
2806 case MAP_DATA:
2807 symname = "$d";
2808 type = BSF_NO_FLAGS;
2809 break;
2810 case MAP_ARM:
2811 symname = "$a";
2812 type = BSF_NO_FLAGS;
2813 break;
2814 case MAP_THUMB:
2815 symname = "$t";
2816 type = BSF_NO_FLAGS;
2817 break;
2818 default:
2819 abort ();
2820 }
2821
2822 symbolP = symbol_new (symname, now_seg, value, frag);
2823 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2824
2825 switch (state)
2826 {
2827 case MAP_ARM:
2828 THUMB_SET_FUNC (symbolP, 0);
2829 ARM_SET_THUMB (symbolP, 0);
2830 ARM_SET_INTERWORK (symbolP, support_interwork);
2831 break;
2832
2833 case MAP_THUMB:
2834 THUMB_SET_FUNC (symbolP, 1);
2835 ARM_SET_THUMB (symbolP, 1);
2836 ARM_SET_INTERWORK (symbolP, support_interwork);
2837 break;
2838
2839 case MAP_DATA:
2840 default:
2841 break;
2842 }
2843
2844 /* Save the mapping symbols for future reference. Also check that
2845 we do not place two mapping symbols at the same offset within a
2846 frag. We'll handle overlap between frags in
2847 check_mapping_symbols.
2848
2849 If .fill or other data filling directive generates zero sized data,
2850 the mapping symbol for the following code will have the same value
2851 as the one generated for the data filling directive. In this case,
2852 we replace the old symbol with the new one at the same address. */
2853 if (value == 0)
2854 {
2855 if (frag->tc_frag_data.first_map != NULL)
2856 {
2857 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2858 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2859 }
2860 frag->tc_frag_data.first_map = symbolP;
2861 }
2862 if (frag->tc_frag_data.last_map != NULL)
2863 {
2864 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2865 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2866 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2867 }
2868 frag->tc_frag_data.last_map = symbolP;
2869 }
2870
2871 /* We must sometimes convert a region marked as code to data during
2872 code alignment, if an odd number of bytes have to be padded. The
2873 code mapping symbol is pushed to an aligned address. */
2874
2875 static void
2876 insert_data_mapping_symbol (enum mstate state,
2877 valueT value, fragS *frag, offsetT bytes)
2878 {
2879 /* If there was already a mapping symbol, remove it. */
2880 if (frag->tc_frag_data.last_map != NULL
2881 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2882 {
2883 symbolS *symp = frag->tc_frag_data.last_map;
2884
2885 if (value == 0)
2886 {
2887 know (frag->tc_frag_data.first_map == symp);
2888 frag->tc_frag_data.first_map = NULL;
2889 }
2890 frag->tc_frag_data.last_map = NULL;
2891 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2892 }
2893
2894 make_mapping_symbol (MAP_DATA, value, frag);
2895 make_mapping_symbol (state, value + bytes, frag);
2896 }
2897
2898 static void mapping_state_2 (enum mstate state, int max_chars);
2899
2900 /* Set the mapping state to STATE. Only call this when about to
2901 emit some STATE bytes to the file. */
2902
2903 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2904 void
2905 mapping_state (enum mstate state)
2906 {
2907 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2908
2909 if (mapstate == state)
2910 /* The mapping symbol has already been emitted.
2911 There is nothing else to do. */
2912 return;
2913
2914 if (state == MAP_ARM || state == MAP_THUMB)
2915 /* PR gas/12931
2916 All ARM instructions require 4-byte alignment.
2917 (Almost) all Thumb instructions require 2-byte alignment.
2918
2919 When emitting instructions into any section, mark the section
2920 appropriately.
2921
2922 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2923 but themselves require 2-byte alignment; this applies to some
2924 PC- relative forms. However, these cases will involve implicit
2925 literal pool generation or an explicit .align >=2, both of
2926 which will cause the section to me marked with sufficient
2927 alignment. Thus, we don't handle those cases here. */
2928 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2929
2930 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2931 /* This case will be evaluated later. */
2932 return;
2933
2934 mapping_state_2 (state, 0);
2935 }
2936
2937 /* Same as mapping_state, but MAX_CHARS bytes have already been
2938 allocated. Put the mapping symbol that far back. */
2939
2940 static void
2941 mapping_state_2 (enum mstate state, int max_chars)
2942 {
2943 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2944
2945 if (!SEG_NORMAL (now_seg))
2946 return;
2947
2948 if (mapstate == state)
2949 /* The mapping symbol has already been emitted.
2950 There is nothing else to do. */
2951 return;
2952
2953 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2954 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2955 {
2956 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2957 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2958
2959 if (add_symbol)
2960 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2961 }
2962
2963 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2964 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2965 }
2966 #undef TRANSITION
2967 #else
2968 #define mapping_state(x) ((void)0)
2969 #define mapping_state_2(x, y) ((void)0)
2970 #endif
2971
2972 /* Find the real, Thumb encoded start of a Thumb function. */
2973
2974 #ifdef OBJ_COFF
2975 static symbolS *
2976 find_real_start (symbolS * symbolP)
2977 {
2978 char * real_start;
2979 const char * name = S_GET_NAME (symbolP);
2980 symbolS * new_target;
2981
2982 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2983 #define STUB_NAME ".real_start_of"
2984
2985 if (name == NULL)
2986 abort ();
2987
2988 /* The compiler may generate BL instructions to local labels because
2989 it needs to perform a branch to a far away location. These labels
2990 do not have a corresponding ".real_start_of" label. We check
2991 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2992 the ".real_start_of" convention for nonlocal branches. */
2993 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2994 return symbolP;
2995
2996 real_start = concat (STUB_NAME, name, NULL);
2997 new_target = symbol_find (real_start);
2998 free (real_start);
2999
3000 if (new_target == NULL)
3001 {
3002 as_warn (_("Failed to find real start of function: %s\n"), name);
3003 new_target = symbolP;
3004 }
3005
3006 return new_target;
3007 }
3008 #endif
3009
3010 static void
3011 opcode_select (int width)
3012 {
3013 switch (width)
3014 {
3015 case 16:
3016 if (! thumb_mode)
3017 {
3018 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3019 as_bad (_("selected processor does not support THUMB opcodes"));
3020
3021 thumb_mode = 1;
3022 /* No need to force the alignment, since we will have been
3023 coming from ARM mode, which is word-aligned. */
3024 record_alignment (now_seg, 1);
3025 }
3026 break;
3027
3028 case 32:
3029 if (thumb_mode)
3030 {
3031 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3032 as_bad (_("selected processor does not support ARM opcodes"));
3033
3034 thumb_mode = 0;
3035
3036 if (!need_pass_2)
3037 frag_align (2, 0, 0);
3038
3039 record_alignment (now_seg, 1);
3040 }
3041 break;
3042
3043 default:
3044 as_bad (_("invalid instruction size selected (%d)"), width);
3045 }
3046 }
3047
3048 static void
3049 s_arm (int ignore ATTRIBUTE_UNUSED)
3050 {
3051 opcode_select (32);
3052 demand_empty_rest_of_line ();
3053 }
3054
3055 static void
3056 s_thumb (int ignore ATTRIBUTE_UNUSED)
3057 {
3058 opcode_select (16);
3059 demand_empty_rest_of_line ();
3060 }
3061
3062 static void
3063 s_code (int unused ATTRIBUTE_UNUSED)
3064 {
3065 int temp;
3066
3067 temp = get_absolute_expression ();
3068 switch (temp)
3069 {
3070 case 16:
3071 case 32:
3072 opcode_select (temp);
3073 break;
3074
3075 default:
3076 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3077 }
3078 }
3079
3080 static void
3081 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3082 {
3083 /* If we are not already in thumb mode go into it, EVEN if
3084 the target processor does not support thumb instructions.
3085 This is used by gcc/config/arm/lib1funcs.asm for example
3086 to compile interworking support functions even if the
3087 target processor should not support interworking. */
3088 if (! thumb_mode)
3089 {
3090 thumb_mode = 2;
3091 record_alignment (now_seg, 1);
3092 }
3093
3094 demand_empty_rest_of_line ();
3095 }
3096
3097 static void
3098 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3099 {
3100 s_thumb (0);
3101
3102 /* The following label is the name/address of the start of a Thumb function.
3103 We need to know this for the interworking support. */
3104 label_is_thumb_function_name = TRUE;
3105 }
3106
3107 /* Perform a .set directive, but also mark the alias as
3108 being a thumb function. */
3109
3110 static void
3111 s_thumb_set (int equiv)
3112 {
3113 /* XXX the following is a duplicate of the code for s_set() in read.c
3114 We cannot just call that code as we need to get at the symbol that
3115 is created. */
3116 char * name;
3117 char delim;
3118 char * end_name;
3119 symbolS * symbolP;
3120
3121 /* Especial apologies for the random logic:
3122 This just grew, and could be parsed much more simply!
3123 Dean - in haste. */
3124 delim = get_symbol_name (& name);
3125 end_name = input_line_pointer;
3126 (void) restore_line_pointer (delim);
3127
3128 if (*input_line_pointer != ',')
3129 {
3130 *end_name = 0;
3131 as_bad (_("expected comma after name \"%s\""), name);
3132 *end_name = delim;
3133 ignore_rest_of_line ();
3134 return;
3135 }
3136
3137 input_line_pointer++;
3138 *end_name = 0;
3139
3140 if (name[0] == '.' && name[1] == '\0')
3141 {
3142 /* XXX - this should not happen to .thumb_set. */
3143 abort ();
3144 }
3145
3146 if ((symbolP = symbol_find (name)) == NULL
3147 && (symbolP = md_undefined_symbol (name)) == NULL)
3148 {
3149 #ifndef NO_LISTING
3150 /* When doing symbol listings, play games with dummy fragments living
3151 outside the normal fragment chain to record the file and line info
3152 for this symbol. */
3153 if (listing & LISTING_SYMBOLS)
3154 {
3155 extern struct list_info_struct * listing_tail;
3156 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3157
3158 memset (dummy_frag, 0, sizeof (fragS));
3159 dummy_frag->fr_type = rs_fill;
3160 dummy_frag->line = listing_tail;
3161 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3162 dummy_frag->fr_symbol = symbolP;
3163 }
3164 else
3165 #endif
3166 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3167
3168 #ifdef OBJ_COFF
3169 /* "set" symbols are local unless otherwise specified. */
3170 SF_SET_LOCAL (symbolP);
3171 #endif /* OBJ_COFF */
3172 } /* Make a new symbol. */
3173
3174 symbol_table_insert (symbolP);
3175
3176 * end_name = delim;
3177
3178 if (equiv
3179 && S_IS_DEFINED (symbolP)
3180 && S_GET_SEGMENT (symbolP) != reg_section)
3181 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3182
3183 pseudo_set (symbolP);
3184
3185 demand_empty_rest_of_line ();
3186
3187 /* XXX Now we come to the Thumb specific bit of code. */
3188
3189 THUMB_SET_FUNC (symbolP, 1);
3190 ARM_SET_THUMB (symbolP, 1);
3191 #if defined OBJ_ELF || defined OBJ_COFF
3192 ARM_SET_INTERWORK (symbolP, support_interwork);
3193 #endif
3194 }
3195
3196 /* Directives: Mode selection. */
3197
3198 /* .syntax [unified|divided] - choose the new unified syntax
3199 (same for Arm and Thumb encoding, modulo slight differences in what
3200 can be represented) or the old divergent syntax for each mode. */
3201 static void
3202 s_syntax (int unused ATTRIBUTE_UNUSED)
3203 {
3204 char *name, delim;
3205
3206 delim = get_symbol_name (& name);
3207
3208 if (!strcasecmp (name, "unified"))
3209 unified_syntax = TRUE;
3210 else if (!strcasecmp (name, "divided"))
3211 unified_syntax = FALSE;
3212 else
3213 {
3214 as_bad (_("unrecognized syntax mode \"%s\""), name);
3215 return;
3216 }
3217 (void) restore_line_pointer (delim);
3218 demand_empty_rest_of_line ();
3219 }
3220
3221 /* Directives: sectioning and alignment. */
3222
3223 static void
3224 s_bss (int ignore ATTRIBUTE_UNUSED)
3225 {
3226 /* We don't support putting frags in the BSS segment, we fake it by
3227 marking in_bss, then looking at s_skip for clues. */
3228 subseg_set (bss_section, 0);
3229 demand_empty_rest_of_line ();
3230
3231 #ifdef md_elf_section_change_hook
3232 md_elf_section_change_hook ();
3233 #endif
3234 }
3235
3236 static void
3237 s_even (int ignore ATTRIBUTE_UNUSED)
3238 {
3239 /* Never make frag if expect extra pass. */
3240 if (!need_pass_2)
3241 frag_align (1, 0, 0);
3242
3243 record_alignment (now_seg, 1);
3244
3245 demand_empty_rest_of_line ();
3246 }
3247
3248 /* Directives: CodeComposer Studio. */
3249
3250 /* .ref (for CodeComposer Studio syntax only). */
3251 static void
3252 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3253 {
3254 if (codecomposer_syntax)
3255 ignore_rest_of_line ();
3256 else
3257 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3258 }
3259
3260 /* If name is not NULL, then it is used for marking the beginning of a
3261 function, whereas if it is NULL then it means the function end. */
3262 static void
3263 asmfunc_debug (const char * name)
3264 {
3265 static const char * last_name = NULL;
3266
3267 if (name != NULL)
3268 {
3269 gas_assert (last_name == NULL);
3270 last_name = name;
3271
3272 if (debug_type == DEBUG_STABS)
3273 stabs_generate_asm_func (name, name);
3274 }
3275 else
3276 {
3277 gas_assert (last_name != NULL);
3278
3279 if (debug_type == DEBUG_STABS)
3280 stabs_generate_asm_endfunc (last_name, last_name);
3281
3282 last_name = NULL;
3283 }
3284 }
3285
3286 static void
3287 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3288 {
3289 if (codecomposer_syntax)
3290 {
3291 switch (asmfunc_state)
3292 {
3293 case OUTSIDE_ASMFUNC:
3294 asmfunc_state = WAITING_ASMFUNC_NAME;
3295 break;
3296
3297 case WAITING_ASMFUNC_NAME:
3298 as_bad (_(".asmfunc repeated."));
3299 break;
3300
3301 case WAITING_ENDASMFUNC:
3302 as_bad (_(".asmfunc without function."));
3303 break;
3304 }
3305 demand_empty_rest_of_line ();
3306 }
3307 else
3308 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3309 }
3310
3311 static void
3312 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3313 {
3314 if (codecomposer_syntax)
3315 {
3316 switch (asmfunc_state)
3317 {
3318 case OUTSIDE_ASMFUNC:
3319 as_bad (_(".endasmfunc without a .asmfunc."));
3320 break;
3321
3322 case WAITING_ASMFUNC_NAME:
3323 as_bad (_(".endasmfunc without function."));
3324 break;
3325
3326 case WAITING_ENDASMFUNC:
3327 asmfunc_state = OUTSIDE_ASMFUNC;
3328 asmfunc_debug (NULL);
3329 break;
3330 }
3331 demand_empty_rest_of_line ();
3332 }
3333 else
3334 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3335 }
3336
3337 static void
3338 s_ccs_def (int name)
3339 {
3340 if (codecomposer_syntax)
3341 s_globl (name);
3342 else
3343 as_bad (_(".def pseudo-op only available with -mccs flag."));
3344 }
3345
3346 /* Directives: Literal pools. */
3347
3348 static literal_pool *
3349 find_literal_pool (void)
3350 {
3351 literal_pool * pool;
3352
3353 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3354 {
3355 if (pool->section == now_seg
3356 && pool->sub_section == now_subseg)
3357 break;
3358 }
3359
3360 return pool;
3361 }
3362
3363 static literal_pool *
3364 find_or_make_literal_pool (void)
3365 {
3366 /* Next literal pool ID number. */
3367 static unsigned int latest_pool_num = 1;
3368 literal_pool * pool;
3369
3370 pool = find_literal_pool ();
3371
3372 if (pool == NULL)
3373 {
3374 /* Create a new pool. */
3375 pool = XNEW (literal_pool);
3376 if (! pool)
3377 return NULL;
3378
3379 pool->next_free_entry = 0;
3380 pool->section = now_seg;
3381 pool->sub_section = now_subseg;
3382 pool->next = list_of_pools;
3383 pool->symbol = NULL;
3384 pool->alignment = 2;
3385
3386 /* Add it to the list. */
3387 list_of_pools = pool;
3388 }
3389
3390 /* New pools, and emptied pools, will have a NULL symbol. */
3391 if (pool->symbol == NULL)
3392 {
3393 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3394 (valueT) 0, &zero_address_frag);
3395 pool->id = latest_pool_num ++;
3396 }
3397
3398 /* Done. */
3399 return pool;
3400 }
3401
3402 /* Add the literal in the global 'inst'
3403 structure to the relevant literal pool. */
3404
3405 static int
3406 add_to_lit_pool (unsigned int nbytes)
3407 {
3408 #define PADDING_SLOT 0x1
3409 #define LIT_ENTRY_SIZE_MASK 0xFF
3410 literal_pool * pool;
3411 unsigned int entry, pool_size = 0;
3412 bfd_boolean padding_slot_p = FALSE;
3413 unsigned imm1 = 0;
3414 unsigned imm2 = 0;
3415
3416 if (nbytes == 8)
3417 {
3418 imm1 = inst.operands[1].imm;
3419 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3420 : inst.relocs[0].exp.X_unsigned ? 0
3421 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3422 if (target_big_endian)
3423 {
3424 imm1 = imm2;
3425 imm2 = inst.operands[1].imm;
3426 }
3427 }
3428
3429 pool = find_or_make_literal_pool ();
3430
3431 /* Check if this literal value is already in the pool. */
3432 for (entry = 0; entry < pool->next_free_entry; entry ++)
3433 {
3434 if (nbytes == 4)
3435 {
3436 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3437 && (inst.relocs[0].exp.X_op == O_constant)
3438 && (pool->literals[entry].X_add_number
3439 == inst.relocs[0].exp.X_add_number)
3440 && (pool->literals[entry].X_md == nbytes)
3441 && (pool->literals[entry].X_unsigned
3442 == inst.relocs[0].exp.X_unsigned))
3443 break;
3444
3445 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3446 && (inst.relocs[0].exp.X_op == O_symbol)
3447 && (pool->literals[entry].X_add_number
3448 == inst.relocs[0].exp.X_add_number)
3449 && (pool->literals[entry].X_add_symbol
3450 == inst.relocs[0].exp.X_add_symbol)
3451 && (pool->literals[entry].X_op_symbol
3452 == inst.relocs[0].exp.X_op_symbol)
3453 && (pool->literals[entry].X_md == nbytes))
3454 break;
3455 }
3456 else if ((nbytes == 8)
3457 && !(pool_size & 0x7)
3458 && ((entry + 1) != pool->next_free_entry)
3459 && (pool->literals[entry].X_op == O_constant)
3460 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3461 && (pool->literals[entry].X_unsigned
3462 == inst.relocs[0].exp.X_unsigned)
3463 && (pool->literals[entry + 1].X_op == O_constant)
3464 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3465 && (pool->literals[entry + 1].X_unsigned
3466 == inst.relocs[0].exp.X_unsigned))
3467 break;
3468
3469 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3470 if (padding_slot_p && (nbytes == 4))
3471 break;
3472
3473 pool_size += 4;
3474 }
3475
3476 /* Do we need to create a new entry? */
3477 if (entry == pool->next_free_entry)
3478 {
3479 if (entry >= MAX_LITERAL_POOL_SIZE)
3480 {
3481 inst.error = _("literal pool overflow");
3482 return FAIL;
3483 }
3484
3485 if (nbytes == 8)
3486 {
3487 /* For 8-byte entries, we align to an 8-byte boundary,
3488 and split it into two 4-byte entries, because on 32-bit
3489 host, 8-byte constants are treated as big num, thus
3490 saved in "generic_bignum" which will be overwritten
3491 by later assignments.
3492
3493 We also need to make sure there is enough space for
3494 the split.
3495
3496 We also check to make sure the literal operand is a
3497 constant number. */
3498 if (!(inst.relocs[0].exp.X_op == O_constant
3499 || inst.relocs[0].exp.X_op == O_big))
3500 {
3501 inst.error = _("invalid type for literal pool");
3502 return FAIL;
3503 }
3504 else if (pool_size & 0x7)
3505 {
3506 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3507 {
3508 inst.error = _("literal pool overflow");
3509 return FAIL;
3510 }
3511
3512 pool->literals[entry] = inst.relocs[0].exp;
3513 pool->literals[entry].X_op = O_constant;
3514 pool->literals[entry].X_add_number = 0;
3515 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3516 pool->next_free_entry += 1;
3517 pool_size += 4;
3518 }
3519 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3520 {
3521 inst.error = _("literal pool overflow");
3522 return FAIL;
3523 }
3524
3525 pool->literals[entry] = inst.relocs[0].exp;
3526 pool->literals[entry].X_op = O_constant;
3527 pool->literals[entry].X_add_number = imm1;
3528 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3529 pool->literals[entry++].X_md = 4;
3530 pool->literals[entry] = inst.relocs[0].exp;
3531 pool->literals[entry].X_op = O_constant;
3532 pool->literals[entry].X_add_number = imm2;
3533 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3534 pool->literals[entry].X_md = 4;
3535 pool->alignment = 3;
3536 pool->next_free_entry += 1;
3537 }
3538 else
3539 {
3540 pool->literals[entry] = inst.relocs[0].exp;
3541 pool->literals[entry].X_md = 4;
3542 }
3543
3544 #ifdef OBJ_ELF
3545 /* PR ld/12974: Record the location of the first source line to reference
3546 this entry in the literal pool. If it turns out during linking that the
3547 symbol does not exist we will be able to give an accurate line number for
3548 the (first use of the) missing reference. */
3549 if (debug_type == DEBUG_DWARF2)
3550 dwarf2_where (pool->locs + entry);
3551 #endif
3552 pool->next_free_entry += 1;
3553 }
3554 else if (padding_slot_p)
3555 {
3556 pool->literals[entry] = inst.relocs[0].exp;
3557 pool->literals[entry].X_md = nbytes;
3558 }
3559
3560 inst.relocs[0].exp.X_op = O_symbol;
3561 inst.relocs[0].exp.X_add_number = pool_size;
3562 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3563
3564 return SUCCESS;
3565 }
3566
3567 bfd_boolean
3568 tc_start_label_without_colon (void)
3569 {
3570 bfd_boolean ret = TRUE;
3571
3572 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3573 {
3574 const char *label = input_line_pointer;
3575
3576 while (!is_end_of_line[(int) label[-1]])
3577 --label;
3578
3579 if (*label == '.')
3580 {
3581 as_bad (_("Invalid label '%s'"), label);
3582 ret = FALSE;
3583 }
3584
3585 asmfunc_debug (label);
3586
3587 asmfunc_state = WAITING_ENDASMFUNC;
3588 }
3589
3590 return ret;
3591 }
3592
3593 /* Can't use symbol_new here, so have to create a symbol and then at
3594 a later date assign it a value. That's what these functions do. */
3595
3596 static void
3597 symbol_locate (symbolS * symbolP,
3598 const char * name, /* It is copied, the caller can modify. */
3599 segT segment, /* Segment identifier (SEG_<something>). */
3600 valueT valu, /* Symbol value. */
3601 fragS * frag) /* Associated fragment. */
3602 {
3603 size_t name_length;
3604 char * preserved_copy_of_name;
3605
3606 name_length = strlen (name) + 1; /* +1 for \0. */
3607 obstack_grow (&notes, name, name_length);
3608 preserved_copy_of_name = (char *) obstack_finish (&notes);
3609
3610 #ifdef tc_canonicalize_symbol_name
3611 preserved_copy_of_name =
3612 tc_canonicalize_symbol_name (preserved_copy_of_name);
3613 #endif
3614
3615 S_SET_NAME (symbolP, preserved_copy_of_name);
3616
3617 S_SET_SEGMENT (symbolP, segment);
3618 S_SET_VALUE (symbolP, valu);
3619 symbol_clear_list_pointers (symbolP);
3620
3621 symbol_set_frag (symbolP, frag);
3622
3623 /* Link to end of symbol chain. */
3624 {
3625 extern int symbol_table_frozen;
3626
3627 if (symbol_table_frozen)
3628 abort ();
3629 }
3630
3631 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3632
3633 obj_symbol_new_hook (symbolP);
3634
3635 #ifdef tc_symbol_new_hook
3636 tc_symbol_new_hook (symbolP);
3637 #endif
3638
3639 #ifdef DEBUG_SYMS
3640 verify_symbol_chain (symbol_rootP, symbol_lastP);
3641 #endif /* DEBUG_SYMS */
3642 }
3643
3644 static void
3645 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3646 {
3647 unsigned int entry;
3648 literal_pool * pool;
3649 char sym_name[20];
3650
3651 pool = find_literal_pool ();
3652 if (pool == NULL
3653 || pool->symbol == NULL
3654 || pool->next_free_entry == 0)
3655 return;
3656
3657 /* Align pool as you have word accesses.
3658 Only make a frag if we have to. */
3659 if (!need_pass_2)
3660 frag_align (pool->alignment, 0, 0);
3661
3662 record_alignment (now_seg, 2);
3663
3664 #ifdef OBJ_ELF
3665 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3666 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3667 #endif
3668 sprintf (sym_name, "$$lit_\002%x", pool->id);
3669
3670 symbol_locate (pool->symbol, sym_name, now_seg,
3671 (valueT) frag_now_fix (), frag_now);
3672 symbol_table_insert (pool->symbol);
3673
3674 ARM_SET_THUMB (pool->symbol, thumb_mode);
3675
3676 #if defined OBJ_COFF || defined OBJ_ELF
3677 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3678 #endif
3679
3680 for (entry = 0; entry < pool->next_free_entry; entry ++)
3681 {
3682 #ifdef OBJ_ELF
3683 if (debug_type == DEBUG_DWARF2)
3684 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3685 #endif
3686 /* First output the expression in the instruction to the pool. */
3687 emit_expr (&(pool->literals[entry]),
3688 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3689 }
3690
3691 /* Mark the pool as empty. */
3692 pool->next_free_entry = 0;
3693 pool->symbol = NULL;
3694 }
3695
3696 #ifdef OBJ_ELF
3697 /* Forward declarations for functions below, in the MD interface
3698 section. */
3699 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3700 static valueT create_unwind_entry (int);
3701 static void start_unwind_section (const segT, int);
3702 static void add_unwind_opcode (valueT, int);
3703 static void flush_pending_unwind (void);
3704
3705 /* Directives: Data. */
3706
3707 static void
3708 s_arm_elf_cons (int nbytes)
3709 {
3710 expressionS exp;
3711
3712 #ifdef md_flush_pending_output
3713 md_flush_pending_output ();
3714 #endif
3715
3716 if (is_it_end_of_statement ())
3717 {
3718 demand_empty_rest_of_line ();
3719 return;
3720 }
3721
3722 #ifdef md_cons_align
3723 md_cons_align (nbytes);
3724 #endif
3725
3726 mapping_state (MAP_DATA);
3727 do
3728 {
3729 int reloc;
3730 char *base = input_line_pointer;
3731
3732 expression (& exp);
3733
3734 if (exp.X_op != O_symbol)
3735 emit_expr (&exp, (unsigned int) nbytes);
3736 else
3737 {
3738 char *before_reloc = input_line_pointer;
3739 reloc = parse_reloc (&input_line_pointer);
3740 if (reloc == -1)
3741 {
3742 as_bad (_("unrecognized relocation suffix"));
3743 ignore_rest_of_line ();
3744 return;
3745 }
3746 else if (reloc == BFD_RELOC_UNUSED)
3747 emit_expr (&exp, (unsigned int) nbytes);
3748 else
3749 {
3750 reloc_howto_type *howto = (reloc_howto_type *)
3751 bfd_reloc_type_lookup (stdoutput,
3752 (bfd_reloc_code_real_type) reloc);
3753 int size = bfd_get_reloc_size (howto);
3754
3755 if (reloc == BFD_RELOC_ARM_PLT32)
3756 {
3757 as_bad (_("(plt) is only valid on branch targets"));
3758 reloc = BFD_RELOC_UNUSED;
3759 size = 0;
3760 }
3761
3762 if (size > nbytes)
3763 as_bad (ngettext ("%s relocations do not fit in %d byte",
3764 "%s relocations do not fit in %d bytes",
3765 nbytes),
3766 howto->name, nbytes);
3767 else
3768 {
3769 /* We've parsed an expression stopping at O_symbol.
3770 But there may be more expression left now that we
3771 have parsed the relocation marker. Parse it again.
3772 XXX Surely there is a cleaner way to do this. */
3773 char *p = input_line_pointer;
3774 int offset;
3775 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3776
3777 memcpy (save_buf, base, input_line_pointer - base);
3778 memmove (base + (input_line_pointer - before_reloc),
3779 base, before_reloc - base);
3780
3781 input_line_pointer = base + (input_line_pointer-before_reloc);
3782 expression (&exp);
3783 memcpy (base, save_buf, p - base);
3784
3785 offset = nbytes - size;
3786 p = frag_more (nbytes);
3787 memset (p, 0, nbytes);
3788 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3789 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3790 free (save_buf);
3791 }
3792 }
3793 }
3794 }
3795 while (*input_line_pointer++ == ',');
3796
3797 /* Put terminator back into stream. */
3798 input_line_pointer --;
3799 demand_empty_rest_of_line ();
3800 }
3801
3802 /* Emit an expression containing a 32-bit thumb instruction.
3803 Implementation based on put_thumb32_insn. */
3804
3805 static void
3806 emit_thumb32_expr (expressionS * exp)
3807 {
3808 expressionS exp_high = *exp;
3809
3810 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3811 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3812 exp->X_add_number &= 0xffff;
3813 emit_expr (exp, (unsigned int) THUMB_SIZE);
3814 }
3815
3816 /* Guess the instruction size based on the opcode. */
3817
3818 static int
3819 thumb_insn_size (int opcode)
3820 {
3821 if ((unsigned int) opcode < 0xe800u)
3822 return 2;
3823 else if ((unsigned int) opcode >= 0xe8000000u)
3824 return 4;
3825 else
3826 return 0;
3827 }
3828
3829 static bfd_boolean
3830 emit_insn (expressionS *exp, int nbytes)
3831 {
3832 int size = 0;
3833
3834 if (exp->X_op == O_constant)
3835 {
3836 size = nbytes;
3837
3838 if (size == 0)
3839 size = thumb_insn_size (exp->X_add_number);
3840
3841 if (size != 0)
3842 {
3843 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3844 {
3845 as_bad (_(".inst.n operand too big. "\
3846 "Use .inst.w instead"));
3847 size = 0;
3848 }
3849 else
3850 {
3851 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3852 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3853 else
3854 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3855
3856 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3857 emit_thumb32_expr (exp);
3858 else
3859 emit_expr (exp, (unsigned int) size);
3860
3861 it_fsm_post_encode ();
3862 }
3863 }
3864 else
3865 as_bad (_("cannot determine Thumb instruction size. " \
3866 "Use .inst.n/.inst.w instead"));
3867 }
3868 else
3869 as_bad (_("constant expression required"));
3870
3871 return (size != 0);
3872 }
3873
3874 /* Like s_arm_elf_cons but do not use md_cons_align and
3875 set the mapping state to MAP_ARM/MAP_THUMB. */
3876
3877 static void
3878 s_arm_elf_inst (int nbytes)
3879 {
3880 if (is_it_end_of_statement ())
3881 {
3882 demand_empty_rest_of_line ();
3883 return;
3884 }
3885
3886 /* Calling mapping_state () here will not change ARM/THUMB,
3887 but will ensure not to be in DATA state. */
3888
3889 if (thumb_mode)
3890 mapping_state (MAP_THUMB);
3891 else
3892 {
3893 if (nbytes != 0)
3894 {
3895 as_bad (_("width suffixes are invalid in ARM mode"));
3896 ignore_rest_of_line ();
3897 return;
3898 }
3899
3900 nbytes = 4;
3901
3902 mapping_state (MAP_ARM);
3903 }
3904
3905 do
3906 {
3907 expressionS exp;
3908
3909 expression (& exp);
3910
3911 if (! emit_insn (& exp, nbytes))
3912 {
3913 ignore_rest_of_line ();
3914 return;
3915 }
3916 }
3917 while (*input_line_pointer++ == ',');
3918
3919 /* Put terminator back into stream. */
3920 input_line_pointer --;
3921 demand_empty_rest_of_line ();
3922 }
3923
3924 /* Parse a .rel31 directive. */
3925
3926 static void
3927 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3928 {
3929 expressionS exp;
3930 char *p;
3931 valueT highbit;
3932
3933 highbit = 0;
3934 if (*input_line_pointer == '1')
3935 highbit = 0x80000000;
3936 else if (*input_line_pointer != '0')
3937 as_bad (_("expected 0 or 1"));
3938
3939 input_line_pointer++;
3940 if (*input_line_pointer != ',')
3941 as_bad (_("missing comma"));
3942 input_line_pointer++;
3943
3944 #ifdef md_flush_pending_output
3945 md_flush_pending_output ();
3946 #endif
3947
3948 #ifdef md_cons_align
3949 md_cons_align (4);
3950 #endif
3951
3952 mapping_state (MAP_DATA);
3953
3954 expression (&exp);
3955
3956 p = frag_more (4);
3957 md_number_to_chars (p, highbit, 4);
3958 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3959 BFD_RELOC_ARM_PREL31);
3960
3961 demand_empty_rest_of_line ();
3962 }
3963
3964 /* Directives: AEABI stack-unwind tables. */
3965
3966 /* Parse an unwind_fnstart directive. Simply records the current location. */
3967
3968 static void
3969 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3970 {
3971 demand_empty_rest_of_line ();
3972 if (unwind.proc_start)
3973 {
3974 as_bad (_("duplicate .fnstart directive"));
3975 return;
3976 }
3977
3978 /* Mark the start of the function. */
3979 unwind.proc_start = expr_build_dot ();
3980
3981 /* Reset the rest of the unwind info. */
3982 unwind.opcode_count = 0;
3983 unwind.table_entry = NULL;
3984 unwind.personality_routine = NULL;
3985 unwind.personality_index = -1;
3986 unwind.frame_size = 0;
3987 unwind.fp_offset = 0;
3988 unwind.fp_reg = REG_SP;
3989 unwind.fp_used = 0;
3990 unwind.sp_restored = 0;
3991 }
3992
3993
3994 /* Parse a handlerdata directive. Creates the exception handling table entry
3995 for the function. */
3996
3997 static void
3998 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3999 {
4000 demand_empty_rest_of_line ();
4001 if (!unwind.proc_start)
4002 as_bad (MISSING_FNSTART);
4003
4004 if (unwind.table_entry)
4005 as_bad (_("duplicate .handlerdata directive"));
4006
4007 create_unwind_entry (1);
4008 }
4009
4010 /* Parse an unwind_fnend directive. Generates the index table entry. */
4011
4012 static void
4013 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4014 {
4015 long where;
4016 char *ptr;
4017 valueT val;
4018 unsigned int marked_pr_dependency;
4019
4020 demand_empty_rest_of_line ();
4021
4022 if (!unwind.proc_start)
4023 {
4024 as_bad (_(".fnend directive without .fnstart"));
4025 return;
4026 }
4027
4028 /* Add eh table entry. */
4029 if (unwind.table_entry == NULL)
4030 val = create_unwind_entry (0);
4031 else
4032 val = 0;
4033
4034 /* Add index table entry. This is two words. */
4035 start_unwind_section (unwind.saved_seg, 1);
4036 frag_align (2, 0, 0);
4037 record_alignment (now_seg, 2);
4038
4039 ptr = frag_more (8);
4040 memset (ptr, 0, 8);
4041 where = frag_now_fix () - 8;
4042
4043 /* Self relative offset of the function start. */
4044 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4045 BFD_RELOC_ARM_PREL31);
4046
4047 /* Indicate dependency on EHABI-defined personality routines to the
4048 linker, if it hasn't been done already. */
4049 marked_pr_dependency
4050 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4051 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4052 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4053 {
4054 static const char *const name[] =
4055 {
4056 "__aeabi_unwind_cpp_pr0",
4057 "__aeabi_unwind_cpp_pr1",
4058 "__aeabi_unwind_cpp_pr2"
4059 };
4060 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4061 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4062 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4063 |= 1 << unwind.personality_index;
4064 }
4065
4066 if (val)
4067 /* Inline exception table entry. */
4068 md_number_to_chars (ptr + 4, val, 4);
4069 else
4070 /* Self relative offset of the table entry. */
4071 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4072 BFD_RELOC_ARM_PREL31);
4073
4074 /* Restore the original section. */
4075 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4076
4077 unwind.proc_start = NULL;
4078 }
4079
4080
4081 /* Parse an unwind_cantunwind directive. */
4082
4083 static void
4084 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4085 {
4086 demand_empty_rest_of_line ();
4087 if (!unwind.proc_start)
4088 as_bad (MISSING_FNSTART);
4089
4090 if (unwind.personality_routine || unwind.personality_index != -1)
4091 as_bad (_("personality routine specified for cantunwind frame"));
4092
4093 unwind.personality_index = -2;
4094 }
4095
4096
4097 /* Parse a personalityindex directive. */
4098
4099 static void
4100 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4101 {
4102 expressionS exp;
4103
4104 if (!unwind.proc_start)
4105 as_bad (MISSING_FNSTART);
4106
4107 if (unwind.personality_routine || unwind.personality_index != -1)
4108 as_bad (_("duplicate .personalityindex directive"));
4109
4110 expression (&exp);
4111
4112 if (exp.X_op != O_constant
4113 || exp.X_add_number < 0 || exp.X_add_number > 15)
4114 {
4115 as_bad (_("bad personality routine number"));
4116 ignore_rest_of_line ();
4117 return;
4118 }
4119
4120 unwind.personality_index = exp.X_add_number;
4121
4122 demand_empty_rest_of_line ();
4123 }
4124
4125
4126 /* Parse a personality directive. */
4127
4128 static void
4129 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4130 {
4131 char *name, *p, c;
4132
4133 if (!unwind.proc_start)
4134 as_bad (MISSING_FNSTART);
4135
4136 if (unwind.personality_routine || unwind.personality_index != -1)
4137 as_bad (_("duplicate .personality directive"));
4138
4139 c = get_symbol_name (& name);
4140 p = input_line_pointer;
4141 if (c == '"')
4142 ++ input_line_pointer;
4143 unwind.personality_routine = symbol_find_or_make (name);
4144 *p = c;
4145 demand_empty_rest_of_line ();
4146 }
4147
4148
4149 /* Parse a directive saving core registers. */
4150
4151 static void
4152 s_arm_unwind_save_core (void)
4153 {
4154 valueT op;
4155 long range;
4156 int n;
4157
4158 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4159 if (range == FAIL)
4160 {
4161 as_bad (_("expected register list"));
4162 ignore_rest_of_line ();
4163 return;
4164 }
4165
4166 demand_empty_rest_of_line ();
4167
4168 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4169 into .unwind_save {..., sp...}. We aren't bothered about the value of
4170 ip because it is clobbered by calls. */
4171 if (unwind.sp_restored && unwind.fp_reg == 12
4172 && (range & 0x3000) == 0x1000)
4173 {
4174 unwind.opcode_count--;
4175 unwind.sp_restored = 0;
4176 range = (range | 0x2000) & ~0x1000;
4177 unwind.pending_offset = 0;
4178 }
4179
4180 /* Pop r4-r15. */
4181 if (range & 0xfff0)
4182 {
4183 /* See if we can use the short opcodes. These pop a block of up to 8
4184 registers starting with r4, plus maybe r14. */
4185 for (n = 0; n < 8; n++)
4186 {
4187 /* Break at the first non-saved register. */
4188 if ((range & (1 << (n + 4))) == 0)
4189 break;
4190 }
4191 /* See if there are any other bits set. */
4192 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4193 {
4194 /* Use the long form. */
4195 op = 0x8000 | ((range >> 4) & 0xfff);
4196 add_unwind_opcode (op, 2);
4197 }
4198 else
4199 {
4200 /* Use the short form. */
4201 if (range & 0x4000)
4202 op = 0xa8; /* Pop r14. */
4203 else
4204 op = 0xa0; /* Do not pop r14. */
4205 op |= (n - 1);
4206 add_unwind_opcode (op, 1);
4207 }
4208 }
4209
4210 /* Pop r0-r3. */
4211 if (range & 0xf)
4212 {
4213 op = 0xb100 | (range & 0xf);
4214 add_unwind_opcode (op, 2);
4215 }
4216
4217 /* Record the number of bytes pushed. */
4218 for (n = 0; n < 16; n++)
4219 {
4220 if (range & (1 << n))
4221 unwind.frame_size += 4;
4222 }
4223 }
4224
4225
4226 /* Parse a directive saving FPA registers. */
4227
4228 static void
4229 s_arm_unwind_save_fpa (int reg)
4230 {
4231 expressionS exp;
4232 int num_regs;
4233 valueT op;
4234
4235 /* Get Number of registers to transfer. */
4236 if (skip_past_comma (&input_line_pointer) != FAIL)
4237 expression (&exp);
4238 else
4239 exp.X_op = O_illegal;
4240
4241 if (exp.X_op != O_constant)
4242 {
4243 as_bad (_("expected , <constant>"));
4244 ignore_rest_of_line ();
4245 return;
4246 }
4247
4248 num_regs = exp.X_add_number;
4249
4250 if (num_regs < 1 || num_regs > 4)
4251 {
4252 as_bad (_("number of registers must be in the range [1:4]"));
4253 ignore_rest_of_line ();
4254 return;
4255 }
4256
4257 demand_empty_rest_of_line ();
4258
4259 if (reg == 4)
4260 {
4261 /* Short form. */
4262 op = 0xb4 | (num_regs - 1);
4263 add_unwind_opcode (op, 1);
4264 }
4265 else
4266 {
4267 /* Long form. */
4268 op = 0xc800 | (reg << 4) | (num_regs - 1);
4269 add_unwind_opcode (op, 2);
4270 }
4271 unwind.frame_size += num_regs * 12;
4272 }
4273
4274
4275 /* Parse a directive saving VFP registers for ARMv6 and above. */
4276
4277 static void
4278 s_arm_unwind_save_vfp_armv6 (void)
4279 {
4280 int count;
4281 unsigned int start;
4282 valueT op;
4283 int num_vfpv3_regs = 0;
4284 int num_regs_below_16;
4285 bfd_boolean partial_match;
4286
4287 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4288 &partial_match);
4289 if (count == FAIL)
4290 {
4291 as_bad (_("expected register list"));
4292 ignore_rest_of_line ();
4293 return;
4294 }
4295
4296 demand_empty_rest_of_line ();
4297
4298 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4299 than FSTMX/FLDMX-style ones). */
4300
4301 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4302 if (start >= 16)
4303 num_vfpv3_regs = count;
4304 else if (start + count > 16)
4305 num_vfpv3_regs = start + count - 16;
4306
4307 if (num_vfpv3_regs > 0)
4308 {
4309 int start_offset = start > 16 ? start - 16 : 0;
4310 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4311 add_unwind_opcode (op, 2);
4312 }
4313
4314 /* Generate opcode for registers numbered in the range 0 .. 15. */
4315 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4316 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4317 if (num_regs_below_16 > 0)
4318 {
4319 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4320 add_unwind_opcode (op, 2);
4321 }
4322
4323 unwind.frame_size += count * 8;
4324 }
4325
4326
4327 /* Parse a directive saving VFP registers for pre-ARMv6. */
4328
4329 static void
4330 s_arm_unwind_save_vfp (void)
4331 {
4332 int count;
4333 unsigned int reg;
4334 valueT op;
4335 bfd_boolean partial_match;
4336
4337 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4338 &partial_match);
4339 if (count == FAIL)
4340 {
4341 as_bad (_("expected register list"));
4342 ignore_rest_of_line ();
4343 return;
4344 }
4345
4346 demand_empty_rest_of_line ();
4347
4348 if (reg == 8)
4349 {
4350 /* Short form. */
4351 op = 0xb8 | (count - 1);
4352 add_unwind_opcode (op, 1);
4353 }
4354 else
4355 {
4356 /* Long form. */
4357 op = 0xb300 | (reg << 4) | (count - 1);
4358 add_unwind_opcode (op, 2);
4359 }
4360 unwind.frame_size += count * 8 + 4;
4361 }
4362
4363
4364 /* Parse a directive saving iWMMXt data registers. */
4365
4366 static void
4367 s_arm_unwind_save_mmxwr (void)
4368 {
4369 int reg;
4370 int hi_reg;
4371 int i;
4372 unsigned mask = 0;
4373 valueT op;
4374
4375 if (*input_line_pointer == '{')
4376 input_line_pointer++;
4377
4378 do
4379 {
4380 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4381
4382 if (reg == FAIL)
4383 {
4384 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4385 goto error;
4386 }
4387
4388 if (mask >> reg)
4389 as_tsktsk (_("register list not in ascending order"));
4390 mask |= 1 << reg;
4391
4392 if (*input_line_pointer == '-')
4393 {
4394 input_line_pointer++;
4395 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4396 if (hi_reg == FAIL)
4397 {
4398 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4399 goto error;
4400 }
4401 else if (reg >= hi_reg)
4402 {
4403 as_bad (_("bad register range"));
4404 goto error;
4405 }
4406 for (; reg < hi_reg; reg++)
4407 mask |= 1 << reg;
4408 }
4409 }
4410 while (skip_past_comma (&input_line_pointer) != FAIL);
4411
4412 skip_past_char (&input_line_pointer, '}');
4413
4414 demand_empty_rest_of_line ();
4415
4416 /* Generate any deferred opcodes because we're going to be looking at
4417 the list. */
4418 flush_pending_unwind ();
4419
4420 for (i = 0; i < 16; i++)
4421 {
4422 if (mask & (1 << i))
4423 unwind.frame_size += 8;
4424 }
4425
4426 /* Attempt to combine with a previous opcode. We do this because gcc
4427 likes to output separate unwind directives for a single block of
4428 registers. */
4429 if (unwind.opcode_count > 0)
4430 {
4431 i = unwind.opcodes[unwind.opcode_count - 1];
4432 if ((i & 0xf8) == 0xc0)
4433 {
4434 i &= 7;
4435 /* Only merge if the blocks are contiguous. */
4436 if (i < 6)
4437 {
4438 if ((mask & 0xfe00) == (1 << 9))
4439 {
4440 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4441 unwind.opcode_count--;
4442 }
4443 }
4444 else if (i == 6 && unwind.opcode_count >= 2)
4445 {
4446 i = unwind.opcodes[unwind.opcode_count - 2];
4447 reg = i >> 4;
4448 i &= 0xf;
4449
4450 op = 0xffff << (reg - 1);
4451 if (reg > 0
4452 && ((mask & op) == (1u << (reg - 1))))
4453 {
4454 op = (1 << (reg + i + 1)) - 1;
4455 op &= ~((1 << reg) - 1);
4456 mask |= op;
4457 unwind.opcode_count -= 2;
4458 }
4459 }
4460 }
4461 }
4462
4463 hi_reg = 15;
4464 /* We want to generate opcodes in the order the registers have been
4465 saved, ie. descending order. */
4466 for (reg = 15; reg >= -1; reg--)
4467 {
4468 /* Save registers in blocks. */
4469 if (reg < 0
4470 || !(mask & (1 << reg)))
4471 {
4472 /* We found an unsaved reg. Generate opcodes to save the
4473 preceding block. */
4474 if (reg != hi_reg)
4475 {
4476 if (reg == 9)
4477 {
4478 /* Short form. */
4479 op = 0xc0 | (hi_reg - 10);
4480 add_unwind_opcode (op, 1);
4481 }
4482 else
4483 {
4484 /* Long form. */
4485 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4486 add_unwind_opcode (op, 2);
4487 }
4488 }
4489 hi_reg = reg - 1;
4490 }
4491 }
4492
4493 return;
4494 error:
4495 ignore_rest_of_line ();
4496 }
4497
4498 static void
4499 s_arm_unwind_save_mmxwcg (void)
4500 {
4501 int reg;
4502 int hi_reg;
4503 unsigned mask = 0;
4504 valueT op;
4505
4506 if (*input_line_pointer == '{')
4507 input_line_pointer++;
4508
4509 skip_whitespace (input_line_pointer);
4510
4511 do
4512 {
4513 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4514
4515 if (reg == FAIL)
4516 {
4517 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4518 goto error;
4519 }
4520
4521 reg -= 8;
4522 if (mask >> reg)
4523 as_tsktsk (_("register list not in ascending order"));
4524 mask |= 1 << reg;
4525
4526 if (*input_line_pointer == '-')
4527 {
4528 input_line_pointer++;
4529 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4530 if (hi_reg == FAIL)
4531 {
4532 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4533 goto error;
4534 }
4535 else if (reg >= hi_reg)
4536 {
4537 as_bad (_("bad register range"));
4538 goto error;
4539 }
4540 for (; reg < hi_reg; reg++)
4541 mask |= 1 << reg;
4542 }
4543 }
4544 while (skip_past_comma (&input_line_pointer) != FAIL);
4545
4546 skip_past_char (&input_line_pointer, '}');
4547
4548 demand_empty_rest_of_line ();
4549
4550 /* Generate any deferred opcodes because we're going to be looking at
4551 the list. */
4552 flush_pending_unwind ();
4553
4554 for (reg = 0; reg < 16; reg++)
4555 {
4556 if (mask & (1 << reg))
4557 unwind.frame_size += 4;
4558 }
4559 op = 0xc700 | mask;
4560 add_unwind_opcode (op, 2);
4561 return;
4562 error:
4563 ignore_rest_of_line ();
4564 }
4565
4566
4567 /* Parse an unwind_save directive.
4568 If the argument is non-zero, this is a .vsave directive. */
4569
4570 static void
4571 s_arm_unwind_save (int arch_v6)
4572 {
4573 char *peek;
4574 struct reg_entry *reg;
4575 bfd_boolean had_brace = FALSE;
4576
4577 if (!unwind.proc_start)
4578 as_bad (MISSING_FNSTART);
4579
4580 /* Figure out what sort of save we have. */
4581 peek = input_line_pointer;
4582
4583 if (*peek == '{')
4584 {
4585 had_brace = TRUE;
4586 peek++;
4587 }
4588
4589 reg = arm_reg_parse_multi (&peek);
4590
4591 if (!reg)
4592 {
4593 as_bad (_("register expected"));
4594 ignore_rest_of_line ();
4595 return;
4596 }
4597
4598 switch (reg->type)
4599 {
4600 case REG_TYPE_FN:
4601 if (had_brace)
4602 {
4603 as_bad (_("FPA .unwind_save does not take a register list"));
4604 ignore_rest_of_line ();
4605 return;
4606 }
4607 input_line_pointer = peek;
4608 s_arm_unwind_save_fpa (reg->number);
4609 return;
4610
4611 case REG_TYPE_RN:
4612 s_arm_unwind_save_core ();
4613 return;
4614
4615 case REG_TYPE_VFD:
4616 if (arch_v6)
4617 s_arm_unwind_save_vfp_armv6 ();
4618 else
4619 s_arm_unwind_save_vfp ();
4620 return;
4621
4622 case REG_TYPE_MMXWR:
4623 s_arm_unwind_save_mmxwr ();
4624 return;
4625
4626 case REG_TYPE_MMXWCG:
4627 s_arm_unwind_save_mmxwcg ();
4628 return;
4629
4630 default:
4631 as_bad (_(".unwind_save does not support this kind of register"));
4632 ignore_rest_of_line ();
4633 }
4634 }
4635
4636
4637 /* Parse an unwind_movsp directive. */
4638
4639 static void
4640 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4641 {
4642 int reg;
4643 valueT op;
4644 int offset;
4645
4646 if (!unwind.proc_start)
4647 as_bad (MISSING_FNSTART);
4648
4649 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4650 if (reg == FAIL)
4651 {
4652 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4653 ignore_rest_of_line ();
4654 return;
4655 }
4656
4657 /* Optional constant. */
4658 if (skip_past_comma (&input_line_pointer) != FAIL)
4659 {
4660 if (immediate_for_directive (&offset) == FAIL)
4661 return;
4662 }
4663 else
4664 offset = 0;
4665
4666 demand_empty_rest_of_line ();
4667
4668 if (reg == REG_SP || reg == REG_PC)
4669 {
4670 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4671 return;
4672 }
4673
4674 if (unwind.fp_reg != REG_SP)
4675 as_bad (_("unexpected .unwind_movsp directive"));
4676
4677 /* Generate opcode to restore the value. */
4678 op = 0x90 | reg;
4679 add_unwind_opcode (op, 1);
4680
4681 /* Record the information for later. */
4682 unwind.fp_reg = reg;
4683 unwind.fp_offset = unwind.frame_size - offset;
4684 unwind.sp_restored = 1;
4685 }
4686
4687 /* Parse an unwind_pad directive. */
4688
4689 static void
4690 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4691 {
4692 int offset;
4693
4694 if (!unwind.proc_start)
4695 as_bad (MISSING_FNSTART);
4696
4697 if (immediate_for_directive (&offset) == FAIL)
4698 return;
4699
4700 if (offset & 3)
4701 {
4702 as_bad (_("stack increment must be multiple of 4"));
4703 ignore_rest_of_line ();
4704 return;
4705 }
4706
4707 /* Don't generate any opcodes, just record the details for later. */
4708 unwind.frame_size += offset;
4709 unwind.pending_offset += offset;
4710
4711 demand_empty_rest_of_line ();
4712 }
4713
4714 /* Parse an unwind_setfp directive. */
4715
4716 static void
4717 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4718 {
4719 int sp_reg;
4720 int fp_reg;
4721 int offset;
4722
4723 if (!unwind.proc_start)
4724 as_bad (MISSING_FNSTART);
4725
4726 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4727 if (skip_past_comma (&input_line_pointer) == FAIL)
4728 sp_reg = FAIL;
4729 else
4730 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4731
4732 if (fp_reg == FAIL || sp_reg == FAIL)
4733 {
4734 as_bad (_("expected <reg>, <reg>"));
4735 ignore_rest_of_line ();
4736 return;
4737 }
4738
4739 /* Optional constant. */
4740 if (skip_past_comma (&input_line_pointer) != FAIL)
4741 {
4742 if (immediate_for_directive (&offset) == FAIL)
4743 return;
4744 }
4745 else
4746 offset = 0;
4747
4748 demand_empty_rest_of_line ();
4749
4750 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4751 {
4752 as_bad (_("register must be either sp or set by a previous"
4753 "unwind_movsp directive"));
4754 return;
4755 }
4756
4757 /* Don't generate any opcodes, just record the information for later. */
4758 unwind.fp_reg = fp_reg;
4759 unwind.fp_used = 1;
4760 if (sp_reg == REG_SP)
4761 unwind.fp_offset = unwind.frame_size - offset;
4762 else
4763 unwind.fp_offset -= offset;
4764 }
4765
4766 /* Parse an unwind_raw directive. */
4767
4768 static void
4769 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4770 {
4771 expressionS exp;
4772 /* This is an arbitrary limit. */
4773 unsigned char op[16];
4774 int count;
4775
4776 if (!unwind.proc_start)
4777 as_bad (MISSING_FNSTART);
4778
4779 expression (&exp);
4780 if (exp.X_op == O_constant
4781 && skip_past_comma (&input_line_pointer) != FAIL)
4782 {
4783 unwind.frame_size += exp.X_add_number;
4784 expression (&exp);
4785 }
4786 else
4787 exp.X_op = O_illegal;
4788
4789 if (exp.X_op != O_constant)
4790 {
4791 as_bad (_("expected <offset>, <opcode>"));
4792 ignore_rest_of_line ();
4793 return;
4794 }
4795
4796 count = 0;
4797
4798 /* Parse the opcode. */
4799 for (;;)
4800 {
4801 if (count >= 16)
4802 {
4803 as_bad (_("unwind opcode too long"));
4804 ignore_rest_of_line ();
4805 }
4806 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4807 {
4808 as_bad (_("invalid unwind opcode"));
4809 ignore_rest_of_line ();
4810 return;
4811 }
4812 op[count++] = exp.X_add_number;
4813
4814 /* Parse the next byte. */
4815 if (skip_past_comma (&input_line_pointer) == FAIL)
4816 break;
4817
4818 expression (&exp);
4819 }
4820
4821 /* Add the opcode bytes in reverse order. */
4822 while (count--)
4823 add_unwind_opcode (op[count], 1);
4824
4825 demand_empty_rest_of_line ();
4826 }
4827
4828
4829 /* Parse a .eabi_attribute directive. */
4830
4831 static void
4832 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4833 {
4834 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4835
4836 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4837 attributes_set_explicitly[tag] = 1;
4838 }
4839
4840 /* Emit a tls fix for the symbol. */
4841
4842 static void
4843 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4844 {
4845 char *p;
4846 expressionS exp;
4847 #ifdef md_flush_pending_output
4848 md_flush_pending_output ();
4849 #endif
4850
4851 #ifdef md_cons_align
4852 md_cons_align (4);
4853 #endif
4854
4855 /* Since we're just labelling the code, there's no need to define a
4856 mapping symbol. */
4857 expression (&exp);
4858 p = obstack_next_free (&frchain_now->frch_obstack);
4859 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4860 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4861 : BFD_RELOC_ARM_TLS_DESCSEQ);
4862 }
4863 #endif /* OBJ_ELF */
4864
4865 static void s_arm_arch (int);
4866 static void s_arm_object_arch (int);
4867 static void s_arm_cpu (int);
4868 static void s_arm_fpu (int);
4869 static void s_arm_arch_extension (int);
4870
4871 #ifdef TE_PE
4872
4873 static void
4874 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4875 {
4876 expressionS exp;
4877
4878 do
4879 {
4880 expression (&exp);
4881 if (exp.X_op == O_symbol)
4882 exp.X_op = O_secrel;
4883
4884 emit_expr (&exp, 4);
4885 }
4886 while (*input_line_pointer++ == ',');
4887
4888 input_line_pointer--;
4889 demand_empty_rest_of_line ();
4890 }
4891 #endif /* TE_PE */
4892
4893 /* This table describes all the machine specific pseudo-ops the assembler
4894 has to support. The fields are:
4895 pseudo-op name without dot
4896 function to call to execute this pseudo-op
4897 Integer arg to pass to the function. */
4898
4899 const pseudo_typeS md_pseudo_table[] =
4900 {
4901 /* Never called because '.req' does not start a line. */
4902 { "req", s_req, 0 },
4903 /* Following two are likewise never called. */
4904 { "dn", s_dn, 0 },
4905 { "qn", s_qn, 0 },
4906 { "unreq", s_unreq, 0 },
4907 { "bss", s_bss, 0 },
4908 { "align", s_align_ptwo, 2 },
4909 { "arm", s_arm, 0 },
4910 { "thumb", s_thumb, 0 },
4911 { "code", s_code, 0 },
4912 { "force_thumb", s_force_thumb, 0 },
4913 { "thumb_func", s_thumb_func, 0 },
4914 { "thumb_set", s_thumb_set, 0 },
4915 { "even", s_even, 0 },
4916 { "ltorg", s_ltorg, 0 },
4917 { "pool", s_ltorg, 0 },
4918 { "syntax", s_syntax, 0 },
4919 { "cpu", s_arm_cpu, 0 },
4920 { "arch", s_arm_arch, 0 },
4921 { "object_arch", s_arm_object_arch, 0 },
4922 { "fpu", s_arm_fpu, 0 },
4923 { "arch_extension", s_arm_arch_extension, 0 },
4924 #ifdef OBJ_ELF
4925 { "word", s_arm_elf_cons, 4 },
4926 { "long", s_arm_elf_cons, 4 },
4927 { "inst.n", s_arm_elf_inst, 2 },
4928 { "inst.w", s_arm_elf_inst, 4 },
4929 { "inst", s_arm_elf_inst, 0 },
4930 { "rel31", s_arm_rel31, 0 },
4931 { "fnstart", s_arm_unwind_fnstart, 0 },
4932 { "fnend", s_arm_unwind_fnend, 0 },
4933 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4934 { "personality", s_arm_unwind_personality, 0 },
4935 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4936 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4937 { "save", s_arm_unwind_save, 0 },
4938 { "vsave", s_arm_unwind_save, 1 },
4939 { "movsp", s_arm_unwind_movsp, 0 },
4940 { "pad", s_arm_unwind_pad, 0 },
4941 { "setfp", s_arm_unwind_setfp, 0 },
4942 { "unwind_raw", s_arm_unwind_raw, 0 },
4943 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4944 { "tlsdescseq", s_arm_tls_descseq, 0 },
4945 #else
4946 { "word", cons, 4},
4947
4948 /* These are used for dwarf. */
4949 {"2byte", cons, 2},
4950 {"4byte", cons, 4},
4951 {"8byte", cons, 8},
4952 /* These are used for dwarf2. */
4953 { "file", dwarf2_directive_file, 0 },
4954 { "loc", dwarf2_directive_loc, 0 },
4955 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4956 #endif
4957 { "extend", float_cons, 'x' },
4958 { "ldouble", float_cons, 'x' },
4959 { "packed", float_cons, 'p' },
4960 #ifdef TE_PE
4961 {"secrel32", pe_directive_secrel, 0},
4962 #endif
4963
4964 /* These are for compatibility with CodeComposer Studio. */
4965 {"ref", s_ccs_ref, 0},
4966 {"def", s_ccs_def, 0},
4967 {"asmfunc", s_ccs_asmfunc, 0},
4968 {"endasmfunc", s_ccs_endasmfunc, 0},
4969
4970 { 0, 0, 0 }
4971 };
4972 \f
4973 /* Parser functions used exclusively in instruction operands. */
4974
4975 /* Generic immediate-value read function for use in insn parsing.
4976 STR points to the beginning of the immediate (the leading #);
4977 VAL receives the value; if the value is outside [MIN, MAX]
4978 issue an error. PREFIX_OPT is true if the immediate prefix is
4979 optional. */
4980
4981 static int
4982 parse_immediate (char **str, int *val, int min, int max,
4983 bfd_boolean prefix_opt)
4984 {
4985 expressionS exp;
4986
4987 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4988 if (exp.X_op != O_constant)
4989 {
4990 inst.error = _("constant expression required");
4991 return FAIL;
4992 }
4993
4994 if (exp.X_add_number < min || exp.X_add_number > max)
4995 {
4996 inst.error = _("immediate value out of range");
4997 return FAIL;
4998 }
4999
5000 *val = exp.X_add_number;
5001 return SUCCESS;
5002 }
5003
5004 /* Less-generic immediate-value read function with the possibility of loading a
5005 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5006 instructions. Puts the result directly in inst.operands[i]. */
5007
5008 static int
5009 parse_big_immediate (char **str, int i, expressionS *in_exp,
5010 bfd_boolean allow_symbol_p)
5011 {
5012 expressionS exp;
5013 expressionS *exp_p = in_exp ? in_exp : &exp;
5014 char *ptr = *str;
5015
5016 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5017
5018 if (exp_p->X_op == O_constant)
5019 {
5020 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5021 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5022 O_constant. We have to be careful not to break compilation for
5023 32-bit X_add_number, though. */
5024 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5025 {
5026 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5027 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5028 & 0xffffffff);
5029 inst.operands[i].regisimm = 1;
5030 }
5031 }
5032 else if (exp_p->X_op == O_big
5033 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5034 {
5035 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5036
5037 /* Bignums have their least significant bits in
5038 generic_bignum[0]. Make sure we put 32 bits in imm and
5039 32 bits in reg, in a (hopefully) portable way. */
5040 gas_assert (parts != 0);
5041
5042 /* Make sure that the number is not too big.
5043 PR 11972: Bignums can now be sign-extended to the
5044 size of a .octa so check that the out of range bits
5045 are all zero or all one. */
5046 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5047 {
5048 LITTLENUM_TYPE m = -1;
5049
5050 if (generic_bignum[parts * 2] != 0
5051 && generic_bignum[parts * 2] != m)
5052 return FAIL;
5053
5054 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5055 if (generic_bignum[j] != generic_bignum[j-1])
5056 return FAIL;
5057 }
5058
5059 inst.operands[i].imm = 0;
5060 for (j = 0; j < parts; j++, idx++)
5061 inst.operands[i].imm |= generic_bignum[idx]
5062 << (LITTLENUM_NUMBER_OF_BITS * j);
5063 inst.operands[i].reg = 0;
5064 for (j = 0; j < parts; j++, idx++)
5065 inst.operands[i].reg |= generic_bignum[idx]
5066 << (LITTLENUM_NUMBER_OF_BITS * j);
5067 inst.operands[i].regisimm = 1;
5068 }
5069 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5070 return FAIL;
5071
5072 *str = ptr;
5073
5074 return SUCCESS;
5075 }
5076
5077 /* Returns the pseudo-register number of an FPA immediate constant,
5078 or FAIL if there isn't a valid constant here. */
5079
5080 static int
5081 parse_fpa_immediate (char ** str)
5082 {
5083 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5084 char * save_in;
5085 expressionS exp;
5086 int i;
5087 int j;
5088
5089 /* First try and match exact strings, this is to guarantee
5090 that some formats will work even for cross assembly. */
5091
5092 for (i = 0; fp_const[i]; i++)
5093 {
5094 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5095 {
5096 char *start = *str;
5097
5098 *str += strlen (fp_const[i]);
5099 if (is_end_of_line[(unsigned char) **str])
5100 return i + 8;
5101 *str = start;
5102 }
5103 }
5104
5105 /* Just because we didn't get a match doesn't mean that the constant
5106 isn't valid, just that it is in a format that we don't
5107 automatically recognize. Try parsing it with the standard
5108 expression routines. */
5109
5110 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5111
5112 /* Look for a raw floating point number. */
5113 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5114 && is_end_of_line[(unsigned char) *save_in])
5115 {
5116 for (i = 0; i < NUM_FLOAT_VALS; i++)
5117 {
5118 for (j = 0; j < MAX_LITTLENUMS; j++)
5119 {
5120 if (words[j] != fp_values[i][j])
5121 break;
5122 }
5123
5124 if (j == MAX_LITTLENUMS)
5125 {
5126 *str = save_in;
5127 return i + 8;
5128 }
5129 }
5130 }
5131
5132 /* Try and parse a more complex expression, this will probably fail
5133 unless the code uses a floating point prefix (eg "0f"). */
5134 save_in = input_line_pointer;
5135 input_line_pointer = *str;
5136 if (expression (&exp) == absolute_section
5137 && exp.X_op == O_big
5138 && exp.X_add_number < 0)
5139 {
5140 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5141 Ditto for 15. */
5142 #define X_PRECISION 5
5143 #define E_PRECISION 15L
5144 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5145 {
5146 for (i = 0; i < NUM_FLOAT_VALS; i++)
5147 {
5148 for (j = 0; j < MAX_LITTLENUMS; j++)
5149 {
5150 if (words[j] != fp_values[i][j])
5151 break;
5152 }
5153
5154 if (j == MAX_LITTLENUMS)
5155 {
5156 *str = input_line_pointer;
5157 input_line_pointer = save_in;
5158 return i + 8;
5159 }
5160 }
5161 }
5162 }
5163
5164 *str = input_line_pointer;
5165 input_line_pointer = save_in;
5166 inst.error = _("invalid FPA immediate expression");
5167 return FAIL;
5168 }
5169
5170 /* Returns 1 if a number has "quarter-precision" float format
5171 0baBbbbbbc defgh000 00000000 00000000. */
5172
5173 static int
5174 is_quarter_float (unsigned imm)
5175 {
5176 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5177 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5178 }
5179
5180
5181 /* Detect the presence of a floating point or integer zero constant,
5182 i.e. #0.0 or #0. */
5183
5184 static bfd_boolean
5185 parse_ifimm_zero (char **in)
5186 {
5187 int error_code;
5188
5189 if (!is_immediate_prefix (**in))
5190 {
5191 /* In unified syntax, all prefixes are optional. */
5192 if (!unified_syntax)
5193 return FALSE;
5194 }
5195 else
5196 ++*in;
5197
5198 /* Accept #0x0 as a synonym for #0. */
5199 if (strncmp (*in, "0x", 2) == 0)
5200 {
5201 int val;
5202 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5203 return FALSE;
5204 return TRUE;
5205 }
5206
5207 error_code = atof_generic (in, ".", EXP_CHARS,
5208 &generic_floating_point_number);
5209
5210 if (!error_code
5211 && generic_floating_point_number.sign == '+'
5212 && (generic_floating_point_number.low
5213 > generic_floating_point_number.leader))
5214 return TRUE;
5215
5216 return FALSE;
5217 }
5218
5219 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5220 0baBbbbbbc defgh000 00000000 00000000.
5221 The zero and minus-zero cases need special handling, since they can't be
5222 encoded in the "quarter-precision" float format, but can nonetheless be
5223 loaded as integer constants. */
5224
5225 static unsigned
5226 parse_qfloat_immediate (char **ccp, int *immed)
5227 {
5228 char *str = *ccp;
5229 char *fpnum;
5230 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5231 int found_fpchar = 0;
5232
5233 skip_past_char (&str, '#');
5234
5235 /* We must not accidentally parse an integer as a floating-point number. Make
5236 sure that the value we parse is not an integer by checking for special
5237 characters '.' or 'e'.
5238 FIXME: This is a horrible hack, but doing better is tricky because type
5239 information isn't in a very usable state at parse time. */
5240 fpnum = str;
5241 skip_whitespace (fpnum);
5242
5243 if (strncmp (fpnum, "0x", 2) == 0)
5244 return FAIL;
5245 else
5246 {
5247 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5248 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5249 {
5250 found_fpchar = 1;
5251 break;
5252 }
5253
5254 if (!found_fpchar)
5255 return FAIL;
5256 }
5257
5258 if ((str = atof_ieee (str, 's', words)) != NULL)
5259 {
5260 unsigned fpword = 0;
5261 int i;
5262
5263 /* Our FP word must be 32 bits (single-precision FP). */
5264 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5265 {
5266 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5267 fpword |= words[i];
5268 }
5269
5270 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5271 *immed = fpword;
5272 else
5273 return FAIL;
5274
5275 *ccp = str;
5276
5277 return SUCCESS;
5278 }
5279
5280 return FAIL;
5281 }
5282
5283 /* Shift operands. */
5284 enum shift_kind
5285 {
5286 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5287 };
5288
5289 struct asm_shift_name
5290 {
5291 const char *name;
5292 enum shift_kind kind;
5293 };
5294
5295 /* Third argument to parse_shift. */
5296 enum parse_shift_mode
5297 {
5298 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5299 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5300 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5301 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5302 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5303 };
5304
5305 /* Parse a <shift> specifier on an ARM data processing instruction.
5306 This has three forms:
5307
5308 (LSL|LSR|ASL|ASR|ROR) Rs
5309 (LSL|LSR|ASL|ASR|ROR) #imm
5310 RRX
5311
5312 Note that ASL is assimilated to LSL in the instruction encoding, and
5313 RRX to ROR #0 (which cannot be written as such). */
5314
5315 static int
5316 parse_shift (char **str, int i, enum parse_shift_mode mode)
5317 {
5318 const struct asm_shift_name *shift_name;
5319 enum shift_kind shift;
5320 char *s = *str;
5321 char *p = s;
5322 int reg;
5323
5324 for (p = *str; ISALPHA (*p); p++)
5325 ;
5326
5327 if (p == *str)
5328 {
5329 inst.error = _("shift expression expected");
5330 return FAIL;
5331 }
5332
5333 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5334 p - *str);
5335
5336 if (shift_name == NULL)
5337 {
5338 inst.error = _("shift expression expected");
5339 return FAIL;
5340 }
5341
5342 shift = shift_name->kind;
5343
5344 switch (mode)
5345 {
5346 case NO_SHIFT_RESTRICT:
5347 case SHIFT_IMMEDIATE: break;
5348
5349 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5350 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5351 {
5352 inst.error = _("'LSL' or 'ASR' required");
5353 return FAIL;
5354 }
5355 break;
5356
5357 case SHIFT_LSL_IMMEDIATE:
5358 if (shift != SHIFT_LSL)
5359 {
5360 inst.error = _("'LSL' required");
5361 return FAIL;
5362 }
5363 break;
5364
5365 case SHIFT_ASR_IMMEDIATE:
5366 if (shift != SHIFT_ASR)
5367 {
5368 inst.error = _("'ASR' required");
5369 return FAIL;
5370 }
5371 break;
5372
5373 default: abort ();
5374 }
5375
5376 if (shift != SHIFT_RRX)
5377 {
5378 /* Whitespace can appear here if the next thing is a bare digit. */
5379 skip_whitespace (p);
5380
5381 if (mode == NO_SHIFT_RESTRICT
5382 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5383 {
5384 inst.operands[i].imm = reg;
5385 inst.operands[i].immisreg = 1;
5386 }
5387 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5388 return FAIL;
5389 }
5390 inst.operands[i].shift_kind = shift;
5391 inst.operands[i].shifted = 1;
5392 *str = p;
5393 return SUCCESS;
5394 }
5395
5396 /* Parse a <shifter_operand> for an ARM data processing instruction:
5397
5398 #<immediate>
5399 #<immediate>, <rotate>
5400 <Rm>
5401 <Rm>, <shift>
5402
5403 where <shift> is defined by parse_shift above, and <rotate> is a
5404 multiple of 2 between 0 and 30. Validation of immediate operands
5405 is deferred to md_apply_fix. */
5406
5407 static int
5408 parse_shifter_operand (char **str, int i)
5409 {
5410 int value;
5411 expressionS exp;
5412
5413 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5414 {
5415 inst.operands[i].reg = value;
5416 inst.operands[i].isreg = 1;
5417
5418 /* parse_shift will override this if appropriate */
5419 inst.relocs[0].exp.X_op = O_constant;
5420 inst.relocs[0].exp.X_add_number = 0;
5421
5422 if (skip_past_comma (str) == FAIL)
5423 return SUCCESS;
5424
5425 /* Shift operation on register. */
5426 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5427 }
5428
5429 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5430 return FAIL;
5431
5432 if (skip_past_comma (str) == SUCCESS)
5433 {
5434 /* #x, y -- ie explicit rotation by Y. */
5435 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5436 return FAIL;
5437
5438 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5439 {
5440 inst.error = _("constant expression expected");
5441 return FAIL;
5442 }
5443
5444 value = exp.X_add_number;
5445 if (value < 0 || value > 30 || value % 2 != 0)
5446 {
5447 inst.error = _("invalid rotation");
5448 return FAIL;
5449 }
5450 if (inst.relocs[0].exp.X_add_number < 0
5451 || inst.relocs[0].exp.X_add_number > 255)
5452 {
5453 inst.error = _("invalid constant");
5454 return FAIL;
5455 }
5456
5457 /* Encode as specified. */
5458 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5459 return SUCCESS;
5460 }
5461
5462 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5463 inst.relocs[0].pc_rel = 0;
5464 return SUCCESS;
5465 }
5466
5467 /* Group relocation information. Each entry in the table contains the
5468 textual name of the relocation as may appear in assembler source
5469 and must end with a colon.
5470 Along with this textual name are the relocation codes to be used if
5471 the corresponding instruction is an ALU instruction (ADD or SUB only),
5472 an LDR, an LDRS, or an LDC. */
5473
5474 struct group_reloc_table_entry
5475 {
5476 const char *name;
5477 int alu_code;
5478 int ldr_code;
5479 int ldrs_code;
5480 int ldc_code;
5481 };
5482
5483 typedef enum
5484 {
5485 /* Varieties of non-ALU group relocation. */
5486
5487 GROUP_LDR,
5488 GROUP_LDRS,
5489 GROUP_LDC
5490 } group_reloc_type;
5491
5492 static struct group_reloc_table_entry group_reloc_table[] =
5493 { /* Program counter relative: */
5494 { "pc_g0_nc",
5495 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5496 0, /* LDR */
5497 0, /* LDRS */
5498 0 }, /* LDC */
5499 { "pc_g0",
5500 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5501 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5502 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5503 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5504 { "pc_g1_nc",
5505 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5506 0, /* LDR */
5507 0, /* LDRS */
5508 0 }, /* LDC */
5509 { "pc_g1",
5510 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5511 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5512 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5513 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5514 { "pc_g2",
5515 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5516 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5517 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5518 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5519 /* Section base relative */
5520 { "sb_g0_nc",
5521 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5522 0, /* LDR */
5523 0, /* LDRS */
5524 0 }, /* LDC */
5525 { "sb_g0",
5526 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5527 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5528 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5529 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5530 { "sb_g1_nc",
5531 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5532 0, /* LDR */
5533 0, /* LDRS */
5534 0 }, /* LDC */
5535 { "sb_g1",
5536 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5537 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5538 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5539 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5540 { "sb_g2",
5541 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5542 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5543 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5544 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5545 /* Absolute thumb alu relocations. */
5546 { "lower0_7",
5547 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5548 0, /* LDR. */
5549 0, /* LDRS. */
5550 0 }, /* LDC. */
5551 { "lower8_15",
5552 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5553 0, /* LDR. */
5554 0, /* LDRS. */
5555 0 }, /* LDC. */
5556 { "upper0_7",
5557 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5558 0, /* LDR. */
5559 0, /* LDRS. */
5560 0 }, /* LDC. */
5561 { "upper8_15",
5562 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5563 0, /* LDR. */
5564 0, /* LDRS. */
5565 0 } }; /* LDC. */
5566
5567 /* Given the address of a pointer pointing to the textual name of a group
5568 relocation as may appear in assembler source, attempt to find its details
5569 in group_reloc_table. The pointer will be updated to the character after
5570 the trailing colon. On failure, FAIL will be returned; SUCCESS
5571 otherwise. On success, *entry will be updated to point at the relevant
5572 group_reloc_table entry. */
5573
5574 static int
5575 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5576 {
5577 unsigned int i;
5578 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5579 {
5580 int length = strlen (group_reloc_table[i].name);
5581
5582 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5583 && (*str)[length] == ':')
5584 {
5585 *out = &group_reloc_table[i];
5586 *str += (length + 1);
5587 return SUCCESS;
5588 }
5589 }
5590
5591 return FAIL;
5592 }
5593
5594 /* Parse a <shifter_operand> for an ARM data processing instruction
5595 (as for parse_shifter_operand) where group relocations are allowed:
5596
5597 #<immediate>
5598 #<immediate>, <rotate>
5599 #:<group_reloc>:<expression>
5600 <Rm>
5601 <Rm>, <shift>
5602
5603 where <group_reloc> is one of the strings defined in group_reloc_table.
5604 The hashes are optional.
5605
5606 Everything else is as for parse_shifter_operand. */
5607
5608 static parse_operand_result
5609 parse_shifter_operand_group_reloc (char **str, int i)
5610 {
5611 /* Determine if we have the sequence of characters #: or just :
5612 coming next. If we do, then we check for a group relocation.
5613 If we don't, punt the whole lot to parse_shifter_operand. */
5614
5615 if (((*str)[0] == '#' && (*str)[1] == ':')
5616 || (*str)[0] == ':')
5617 {
5618 struct group_reloc_table_entry *entry;
5619
5620 if ((*str)[0] == '#')
5621 (*str) += 2;
5622 else
5623 (*str)++;
5624
5625 /* Try to parse a group relocation. Anything else is an error. */
5626 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5627 {
5628 inst.error = _("unknown group relocation");
5629 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5630 }
5631
5632 /* We now have the group relocation table entry corresponding to
5633 the name in the assembler source. Next, we parse the expression. */
5634 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5635 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5636
5637 /* Record the relocation type (always the ALU variant here). */
5638 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5639 gas_assert (inst.relocs[0].type != 0);
5640
5641 return PARSE_OPERAND_SUCCESS;
5642 }
5643 else
5644 return parse_shifter_operand (str, i) == SUCCESS
5645 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5646
5647 /* Never reached. */
5648 }
5649
5650 /* Parse a Neon alignment expression. Information is written to
5651 inst.operands[i]. We assume the initial ':' has been skipped.
5652
5653 align .imm = align << 8, .immisalign=1, .preind=0 */
5654 static parse_operand_result
5655 parse_neon_alignment (char **str, int i)
5656 {
5657 char *p = *str;
5658 expressionS exp;
5659
5660 my_get_expression (&exp, &p, GE_NO_PREFIX);
5661
5662 if (exp.X_op != O_constant)
5663 {
5664 inst.error = _("alignment must be constant");
5665 return PARSE_OPERAND_FAIL;
5666 }
5667
5668 inst.operands[i].imm = exp.X_add_number << 8;
5669 inst.operands[i].immisalign = 1;
5670 /* Alignments are not pre-indexes. */
5671 inst.operands[i].preind = 0;
5672
5673 *str = p;
5674 return PARSE_OPERAND_SUCCESS;
5675 }
5676
5677 /* Parse all forms of an ARM address expression. Information is written
5678 to inst.operands[i] and/or inst.relocs[0].
5679
5680 Preindexed addressing (.preind=1):
5681
5682 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5683 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5684 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5685 .shift_kind=shift .relocs[0].exp=shift_imm
5686
5687 These three may have a trailing ! which causes .writeback to be set also.
5688
5689 Postindexed addressing (.postind=1, .writeback=1):
5690
5691 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5692 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5693 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5694 .shift_kind=shift .relocs[0].exp=shift_imm
5695
5696 Unindexed addressing (.preind=0, .postind=0):
5697
5698 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5699
5700 Other:
5701
5702 [Rn]{!} shorthand for [Rn,#0]{!}
5703 =immediate .isreg=0 .relocs[0].exp=immediate
5704 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5705
5706 It is the caller's responsibility to check for addressing modes not
5707 supported by the instruction, and to set inst.relocs[0].type. */
5708
5709 static parse_operand_result
5710 parse_address_main (char **str, int i, int group_relocations,
5711 group_reloc_type group_type)
5712 {
5713 char *p = *str;
5714 int reg;
5715
5716 if (skip_past_char (&p, '[') == FAIL)
5717 {
5718 if (skip_past_char (&p, '=') == FAIL)
5719 {
5720 /* Bare address - translate to PC-relative offset. */
5721 inst.relocs[0].pc_rel = 1;
5722 inst.operands[i].reg = REG_PC;
5723 inst.operands[i].isreg = 1;
5724 inst.operands[i].preind = 1;
5725
5726 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5727 return PARSE_OPERAND_FAIL;
5728 }
5729 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5730 /*allow_symbol_p=*/TRUE))
5731 return PARSE_OPERAND_FAIL;
5732
5733 *str = p;
5734 return PARSE_OPERAND_SUCCESS;
5735 }
5736
5737 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5738 skip_whitespace (p);
5739
5740 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5741 {
5742 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5743 return PARSE_OPERAND_FAIL;
5744 }
5745 inst.operands[i].reg = reg;
5746 inst.operands[i].isreg = 1;
5747
5748 if (skip_past_comma (&p) == SUCCESS)
5749 {
5750 inst.operands[i].preind = 1;
5751
5752 if (*p == '+') p++;
5753 else if (*p == '-') p++, inst.operands[i].negative = 1;
5754
5755 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5756 {
5757 inst.operands[i].imm = reg;
5758 inst.operands[i].immisreg = 1;
5759
5760 if (skip_past_comma (&p) == SUCCESS)
5761 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5762 return PARSE_OPERAND_FAIL;
5763 }
5764 else if (skip_past_char (&p, ':') == SUCCESS)
5765 {
5766 /* FIXME: '@' should be used here, but it's filtered out by generic
5767 code before we get to see it here. This may be subject to
5768 change. */
5769 parse_operand_result result = parse_neon_alignment (&p, i);
5770
5771 if (result != PARSE_OPERAND_SUCCESS)
5772 return result;
5773 }
5774 else
5775 {
5776 if (inst.operands[i].negative)
5777 {
5778 inst.operands[i].negative = 0;
5779 p--;
5780 }
5781
5782 if (group_relocations
5783 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5784 {
5785 struct group_reloc_table_entry *entry;
5786
5787 /* Skip over the #: or : sequence. */
5788 if (*p == '#')
5789 p += 2;
5790 else
5791 p++;
5792
5793 /* Try to parse a group relocation. Anything else is an
5794 error. */
5795 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5796 {
5797 inst.error = _("unknown group relocation");
5798 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5799 }
5800
5801 /* We now have the group relocation table entry corresponding to
5802 the name in the assembler source. Next, we parse the
5803 expression. */
5804 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5805 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5806
5807 /* Record the relocation type. */
5808 switch (group_type)
5809 {
5810 case GROUP_LDR:
5811 inst.relocs[0].type
5812 = (bfd_reloc_code_real_type) entry->ldr_code;
5813 break;
5814
5815 case GROUP_LDRS:
5816 inst.relocs[0].type
5817 = (bfd_reloc_code_real_type) entry->ldrs_code;
5818 break;
5819
5820 case GROUP_LDC:
5821 inst.relocs[0].type
5822 = (bfd_reloc_code_real_type) entry->ldc_code;
5823 break;
5824
5825 default:
5826 gas_assert (0);
5827 }
5828
5829 if (inst.relocs[0].type == 0)
5830 {
5831 inst.error = _("this group relocation is not allowed on this instruction");
5832 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5833 }
5834 }
5835 else
5836 {
5837 char *q = p;
5838
5839 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5840 return PARSE_OPERAND_FAIL;
5841 /* If the offset is 0, find out if it's a +0 or -0. */
5842 if (inst.relocs[0].exp.X_op == O_constant
5843 && inst.relocs[0].exp.X_add_number == 0)
5844 {
5845 skip_whitespace (q);
5846 if (*q == '#')
5847 {
5848 q++;
5849 skip_whitespace (q);
5850 }
5851 if (*q == '-')
5852 inst.operands[i].negative = 1;
5853 }
5854 }
5855 }
5856 }
5857 else if (skip_past_char (&p, ':') == SUCCESS)
5858 {
5859 /* FIXME: '@' should be used here, but it's filtered out by generic code
5860 before we get to see it here. This may be subject to change. */
5861 parse_operand_result result = parse_neon_alignment (&p, i);
5862
5863 if (result != PARSE_OPERAND_SUCCESS)
5864 return result;
5865 }
5866
5867 if (skip_past_char (&p, ']') == FAIL)
5868 {
5869 inst.error = _("']' expected");
5870 return PARSE_OPERAND_FAIL;
5871 }
5872
5873 if (skip_past_char (&p, '!') == SUCCESS)
5874 inst.operands[i].writeback = 1;
5875
5876 else if (skip_past_comma (&p) == SUCCESS)
5877 {
5878 if (skip_past_char (&p, '{') == SUCCESS)
5879 {
5880 /* [Rn], {expr} - unindexed, with option */
5881 if (parse_immediate (&p, &inst.operands[i].imm,
5882 0, 255, TRUE) == FAIL)
5883 return PARSE_OPERAND_FAIL;
5884
5885 if (skip_past_char (&p, '}') == FAIL)
5886 {
5887 inst.error = _("'}' expected at end of 'option' field");
5888 return PARSE_OPERAND_FAIL;
5889 }
5890 if (inst.operands[i].preind)
5891 {
5892 inst.error = _("cannot combine index with option");
5893 return PARSE_OPERAND_FAIL;
5894 }
5895 *str = p;
5896 return PARSE_OPERAND_SUCCESS;
5897 }
5898 else
5899 {
5900 inst.operands[i].postind = 1;
5901 inst.operands[i].writeback = 1;
5902
5903 if (inst.operands[i].preind)
5904 {
5905 inst.error = _("cannot combine pre- and post-indexing");
5906 return PARSE_OPERAND_FAIL;
5907 }
5908
5909 if (*p == '+') p++;
5910 else if (*p == '-') p++, inst.operands[i].negative = 1;
5911
5912 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5913 {
5914 /* We might be using the immediate for alignment already. If we
5915 are, OR the register number into the low-order bits. */
5916 if (inst.operands[i].immisalign)
5917 inst.operands[i].imm |= reg;
5918 else
5919 inst.operands[i].imm = reg;
5920 inst.operands[i].immisreg = 1;
5921
5922 if (skip_past_comma (&p) == SUCCESS)
5923 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5924 return PARSE_OPERAND_FAIL;
5925 }
5926 else
5927 {
5928 char *q = p;
5929
5930 if (inst.operands[i].negative)
5931 {
5932 inst.operands[i].negative = 0;
5933 p--;
5934 }
5935 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5936 return PARSE_OPERAND_FAIL;
5937 /* If the offset is 0, find out if it's a +0 or -0. */
5938 if (inst.relocs[0].exp.X_op == O_constant
5939 && inst.relocs[0].exp.X_add_number == 0)
5940 {
5941 skip_whitespace (q);
5942 if (*q == '#')
5943 {
5944 q++;
5945 skip_whitespace (q);
5946 }
5947 if (*q == '-')
5948 inst.operands[i].negative = 1;
5949 }
5950 }
5951 }
5952 }
5953
5954 /* If at this point neither .preind nor .postind is set, we have a
5955 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5956 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5957 {
5958 inst.operands[i].preind = 1;
5959 inst.relocs[0].exp.X_op = O_constant;
5960 inst.relocs[0].exp.X_add_number = 0;
5961 }
5962 *str = p;
5963 return PARSE_OPERAND_SUCCESS;
5964 }
5965
5966 static int
5967 parse_address (char **str, int i)
5968 {
5969 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5970 ? SUCCESS : FAIL;
5971 }
5972
5973 static parse_operand_result
5974 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5975 {
5976 return parse_address_main (str, i, 1, type);
5977 }
5978
5979 /* Parse an operand for a MOVW or MOVT instruction. */
5980 static int
5981 parse_half (char **str)
5982 {
5983 char * p;
5984
5985 p = *str;
5986 skip_past_char (&p, '#');
5987 if (strncasecmp (p, ":lower16:", 9) == 0)
5988 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
5989 else if (strncasecmp (p, ":upper16:", 9) == 0)
5990 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
5991
5992 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
5993 {
5994 p += 9;
5995 skip_whitespace (p);
5996 }
5997
5998 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5999 return FAIL;
6000
6001 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6002 {
6003 if (inst.relocs[0].exp.X_op != O_constant)
6004 {
6005 inst.error = _("constant expression expected");
6006 return FAIL;
6007 }
6008 if (inst.relocs[0].exp.X_add_number < 0
6009 || inst.relocs[0].exp.X_add_number > 0xffff)
6010 {
6011 inst.error = _("immediate value out of range");
6012 return FAIL;
6013 }
6014 }
6015 *str = p;
6016 return SUCCESS;
6017 }
6018
6019 /* Miscellaneous. */
6020
6021 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6022 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6023 static int
6024 parse_psr (char **str, bfd_boolean lhs)
6025 {
6026 char *p;
6027 unsigned long psr_field;
6028 const struct asm_psr *psr;
6029 char *start;
6030 bfd_boolean is_apsr = FALSE;
6031 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6032
6033 /* PR gas/12698: If the user has specified -march=all then m_profile will
6034 be TRUE, but we want to ignore it in this case as we are building for any
6035 CPU type, including non-m variants. */
6036 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6037 m_profile = FALSE;
6038
6039 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6040 feature for ease of use and backwards compatibility. */
6041 p = *str;
6042 if (strncasecmp (p, "SPSR", 4) == 0)
6043 {
6044 if (m_profile)
6045 goto unsupported_psr;
6046
6047 psr_field = SPSR_BIT;
6048 }
6049 else if (strncasecmp (p, "CPSR", 4) == 0)
6050 {
6051 if (m_profile)
6052 goto unsupported_psr;
6053
6054 psr_field = 0;
6055 }
6056 else if (strncasecmp (p, "APSR", 4) == 0)
6057 {
6058 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6059 and ARMv7-R architecture CPUs. */
6060 is_apsr = TRUE;
6061 psr_field = 0;
6062 }
6063 else if (m_profile)
6064 {
6065 start = p;
6066 do
6067 p++;
6068 while (ISALNUM (*p) || *p == '_');
6069
6070 if (strncasecmp (start, "iapsr", 5) == 0
6071 || strncasecmp (start, "eapsr", 5) == 0
6072 || strncasecmp (start, "xpsr", 4) == 0
6073 || strncasecmp (start, "psr", 3) == 0)
6074 p = start + strcspn (start, "rR") + 1;
6075
6076 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6077 p - start);
6078
6079 if (!psr)
6080 return FAIL;
6081
6082 /* If APSR is being written, a bitfield may be specified. Note that
6083 APSR itself is handled above. */
6084 if (psr->field <= 3)
6085 {
6086 psr_field = psr->field;
6087 is_apsr = TRUE;
6088 goto check_suffix;
6089 }
6090
6091 *str = p;
6092 /* M-profile MSR instructions have the mask field set to "10", except
6093 *PSR variants which modify APSR, which may use a different mask (and
6094 have been handled already). Do that by setting the PSR_f field
6095 here. */
6096 return psr->field | (lhs ? PSR_f : 0);
6097 }
6098 else
6099 goto unsupported_psr;
6100
6101 p += 4;
6102 check_suffix:
6103 if (*p == '_')
6104 {
6105 /* A suffix follows. */
6106 p++;
6107 start = p;
6108
6109 do
6110 p++;
6111 while (ISALNUM (*p) || *p == '_');
6112
6113 if (is_apsr)
6114 {
6115 /* APSR uses a notation for bits, rather than fields. */
6116 unsigned int nzcvq_bits = 0;
6117 unsigned int g_bit = 0;
6118 char *bit;
6119
6120 for (bit = start; bit != p; bit++)
6121 {
6122 switch (TOLOWER (*bit))
6123 {
6124 case 'n':
6125 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6126 break;
6127
6128 case 'z':
6129 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6130 break;
6131
6132 case 'c':
6133 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6134 break;
6135
6136 case 'v':
6137 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6138 break;
6139
6140 case 'q':
6141 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6142 break;
6143
6144 case 'g':
6145 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6146 break;
6147
6148 default:
6149 inst.error = _("unexpected bit specified after APSR");
6150 return FAIL;
6151 }
6152 }
6153
6154 if (nzcvq_bits == 0x1f)
6155 psr_field |= PSR_f;
6156
6157 if (g_bit == 0x1)
6158 {
6159 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6160 {
6161 inst.error = _("selected processor does not "
6162 "support DSP extension");
6163 return FAIL;
6164 }
6165
6166 psr_field |= PSR_s;
6167 }
6168
6169 if ((nzcvq_bits & 0x20) != 0
6170 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6171 || (g_bit & 0x2) != 0)
6172 {
6173 inst.error = _("bad bitmask specified after APSR");
6174 return FAIL;
6175 }
6176 }
6177 else
6178 {
6179 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6180 p - start);
6181 if (!psr)
6182 goto error;
6183
6184 psr_field |= psr->field;
6185 }
6186 }
6187 else
6188 {
6189 if (ISALNUM (*p))
6190 goto error; /* Garbage after "[CS]PSR". */
6191
6192 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6193 is deprecated, but allow it anyway. */
6194 if (is_apsr && lhs)
6195 {
6196 psr_field |= PSR_f;
6197 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6198 "deprecated"));
6199 }
6200 else if (!m_profile)
6201 /* These bits are never right for M-profile devices: don't set them
6202 (only code paths which read/write APSR reach here). */
6203 psr_field |= (PSR_c | PSR_f);
6204 }
6205 *str = p;
6206 return psr_field;
6207
6208 unsupported_psr:
6209 inst.error = _("selected processor does not support requested special "
6210 "purpose register");
6211 return FAIL;
6212
6213 error:
6214 inst.error = _("flag for {c}psr instruction expected");
6215 return FAIL;
6216 }
6217
6218 static int
6219 parse_sys_vldr_vstr (char **str)
6220 {
6221 unsigned i;
6222 int val = FAIL;
6223 struct {
6224 const char *name;
6225 int regl;
6226 int regh;
6227 } sysregs[] = {
6228 {"FPSCR", 0x1, 0x0},
6229 {"FPSCR_nzcvqc", 0x2, 0x0},
6230 {"VPR", 0x4, 0x1},
6231 {"P0", 0x5, 0x1},
6232 {"FPCXTNS", 0x6, 0x1},
6233 {"FPCXTS", 0x7, 0x1}
6234 };
6235 char *op_end = strchr (*str, ',');
6236 size_t op_strlen = op_end - *str;
6237
6238 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6239 {
6240 if (!strncmp (*str, sysregs[i].name, op_strlen))
6241 {
6242 val = sysregs[i].regl | (sysregs[i].regh << 3);
6243 *str = op_end;
6244 break;
6245 }
6246 }
6247
6248 return val;
6249 }
6250
6251 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6252 value suitable for splatting into the AIF field of the instruction. */
6253
6254 static int
6255 parse_cps_flags (char **str)
6256 {
6257 int val = 0;
6258 int saw_a_flag = 0;
6259 char *s = *str;
6260
6261 for (;;)
6262 switch (*s++)
6263 {
6264 case '\0': case ',':
6265 goto done;
6266
6267 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6268 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6269 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6270
6271 default:
6272 inst.error = _("unrecognized CPS flag");
6273 return FAIL;
6274 }
6275
6276 done:
6277 if (saw_a_flag == 0)
6278 {
6279 inst.error = _("missing CPS flags");
6280 return FAIL;
6281 }
6282
6283 *str = s - 1;
6284 return val;
6285 }
6286
6287 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6288 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6289
6290 static int
6291 parse_endian_specifier (char **str)
6292 {
6293 int little_endian;
6294 char *s = *str;
6295
6296 if (strncasecmp (s, "BE", 2))
6297 little_endian = 0;
6298 else if (strncasecmp (s, "LE", 2))
6299 little_endian = 1;
6300 else
6301 {
6302 inst.error = _("valid endian specifiers are be or le");
6303 return FAIL;
6304 }
6305
6306 if (ISALNUM (s[2]) || s[2] == '_')
6307 {
6308 inst.error = _("valid endian specifiers are be or le");
6309 return FAIL;
6310 }
6311
6312 *str = s + 2;
6313 return little_endian;
6314 }
6315
6316 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6317 value suitable for poking into the rotate field of an sxt or sxta
6318 instruction, or FAIL on error. */
6319
6320 static int
6321 parse_ror (char **str)
6322 {
6323 int rot;
6324 char *s = *str;
6325
6326 if (strncasecmp (s, "ROR", 3) == 0)
6327 s += 3;
6328 else
6329 {
6330 inst.error = _("missing rotation field after comma");
6331 return FAIL;
6332 }
6333
6334 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6335 return FAIL;
6336
6337 switch (rot)
6338 {
6339 case 0: *str = s; return 0x0;
6340 case 8: *str = s; return 0x1;
6341 case 16: *str = s; return 0x2;
6342 case 24: *str = s; return 0x3;
6343
6344 default:
6345 inst.error = _("rotation can only be 0, 8, 16, or 24");
6346 return FAIL;
6347 }
6348 }
6349
6350 /* Parse a conditional code (from conds[] below). The value returned is in the
6351 range 0 .. 14, or FAIL. */
6352 static int
6353 parse_cond (char **str)
6354 {
6355 char *q;
6356 const struct asm_cond *c;
6357 int n;
6358 /* Condition codes are always 2 characters, so matching up to
6359 3 characters is sufficient. */
6360 char cond[3];
6361
6362 q = *str;
6363 n = 0;
6364 while (ISALPHA (*q) && n < 3)
6365 {
6366 cond[n] = TOLOWER (*q);
6367 q++;
6368 n++;
6369 }
6370
6371 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6372 if (!c)
6373 {
6374 inst.error = _("condition required");
6375 return FAIL;
6376 }
6377
6378 *str = q;
6379 return c->value;
6380 }
6381
6382 /* Parse an option for a barrier instruction. Returns the encoding for the
6383 option, or FAIL. */
6384 static int
6385 parse_barrier (char **str)
6386 {
6387 char *p, *q;
6388 const struct asm_barrier_opt *o;
6389
6390 p = q = *str;
6391 while (ISALPHA (*q))
6392 q++;
6393
6394 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6395 q - p);
6396 if (!o)
6397 return FAIL;
6398
6399 if (!mark_feature_used (&o->arch))
6400 return FAIL;
6401
6402 *str = q;
6403 return o->value;
6404 }
6405
6406 /* Parse the operands of a table branch instruction. Similar to a memory
6407 operand. */
6408 static int
6409 parse_tb (char **str)
6410 {
6411 char * p = *str;
6412 int reg;
6413
6414 if (skip_past_char (&p, '[') == FAIL)
6415 {
6416 inst.error = _("'[' expected");
6417 return FAIL;
6418 }
6419
6420 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6421 {
6422 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6423 return FAIL;
6424 }
6425 inst.operands[0].reg = reg;
6426
6427 if (skip_past_comma (&p) == FAIL)
6428 {
6429 inst.error = _("',' expected");
6430 return FAIL;
6431 }
6432
6433 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6434 {
6435 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6436 return FAIL;
6437 }
6438 inst.operands[0].imm = reg;
6439
6440 if (skip_past_comma (&p) == SUCCESS)
6441 {
6442 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6443 return FAIL;
6444 if (inst.relocs[0].exp.X_add_number != 1)
6445 {
6446 inst.error = _("invalid shift");
6447 return FAIL;
6448 }
6449 inst.operands[0].shifted = 1;
6450 }
6451
6452 if (skip_past_char (&p, ']') == FAIL)
6453 {
6454 inst.error = _("']' expected");
6455 return FAIL;
6456 }
6457 *str = p;
6458 return SUCCESS;
6459 }
6460
6461 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6462 information on the types the operands can take and how they are encoded.
6463 Up to four operands may be read; this function handles setting the
6464 ".present" field for each read operand itself.
6465 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6466 else returns FAIL. */
6467
6468 static int
6469 parse_neon_mov (char **str, int *which_operand)
6470 {
6471 int i = *which_operand, val;
6472 enum arm_reg_type rtype;
6473 char *ptr = *str;
6474 struct neon_type_el optype;
6475
6476 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6477 {
6478 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6479 inst.operands[i].reg = val;
6480 inst.operands[i].isscalar = 1;
6481 inst.operands[i].vectype = optype;
6482 inst.operands[i++].present = 1;
6483
6484 if (skip_past_comma (&ptr) == FAIL)
6485 goto wanted_comma;
6486
6487 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6488 goto wanted_arm;
6489
6490 inst.operands[i].reg = val;
6491 inst.operands[i].isreg = 1;
6492 inst.operands[i].present = 1;
6493 }
6494 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6495 != FAIL)
6496 {
6497 /* Cases 0, 1, 2, 3, 5 (D only). */
6498 if (skip_past_comma (&ptr) == FAIL)
6499 goto wanted_comma;
6500
6501 inst.operands[i].reg = val;
6502 inst.operands[i].isreg = 1;
6503 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6504 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6505 inst.operands[i].isvec = 1;
6506 inst.operands[i].vectype = optype;
6507 inst.operands[i++].present = 1;
6508
6509 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6510 {
6511 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6512 Case 13: VMOV <Sd>, <Rm> */
6513 inst.operands[i].reg = val;
6514 inst.operands[i].isreg = 1;
6515 inst.operands[i].present = 1;
6516
6517 if (rtype == REG_TYPE_NQ)
6518 {
6519 first_error (_("can't use Neon quad register here"));
6520 return FAIL;
6521 }
6522 else if (rtype != REG_TYPE_VFS)
6523 {
6524 i++;
6525 if (skip_past_comma (&ptr) == FAIL)
6526 goto wanted_comma;
6527 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6528 goto wanted_arm;
6529 inst.operands[i].reg = val;
6530 inst.operands[i].isreg = 1;
6531 inst.operands[i].present = 1;
6532 }
6533 }
6534 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6535 &optype)) != FAIL)
6536 {
6537 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6538 Case 1: VMOV<c><q> <Dd>, <Dm>
6539 Case 8: VMOV.F32 <Sd>, <Sm>
6540 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6541
6542 inst.operands[i].reg = val;
6543 inst.operands[i].isreg = 1;
6544 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6545 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6546 inst.operands[i].isvec = 1;
6547 inst.operands[i].vectype = optype;
6548 inst.operands[i].present = 1;
6549
6550 if (skip_past_comma (&ptr) == SUCCESS)
6551 {
6552 /* Case 15. */
6553 i++;
6554
6555 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6556 goto wanted_arm;
6557
6558 inst.operands[i].reg = val;
6559 inst.operands[i].isreg = 1;
6560 inst.operands[i++].present = 1;
6561
6562 if (skip_past_comma (&ptr) == FAIL)
6563 goto wanted_comma;
6564
6565 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6566 goto wanted_arm;
6567
6568 inst.operands[i].reg = val;
6569 inst.operands[i].isreg = 1;
6570 inst.operands[i].present = 1;
6571 }
6572 }
6573 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6574 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6575 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6576 Case 10: VMOV.F32 <Sd>, #<imm>
6577 Case 11: VMOV.F64 <Dd>, #<imm> */
6578 inst.operands[i].immisfloat = 1;
6579 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6580 == SUCCESS)
6581 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6582 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6583 ;
6584 else
6585 {
6586 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6587 return FAIL;
6588 }
6589 }
6590 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6591 {
6592 /* Cases 6, 7. */
6593 inst.operands[i].reg = val;
6594 inst.operands[i].isreg = 1;
6595 inst.operands[i++].present = 1;
6596
6597 if (skip_past_comma (&ptr) == FAIL)
6598 goto wanted_comma;
6599
6600 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6601 {
6602 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6603 inst.operands[i].reg = val;
6604 inst.operands[i].isscalar = 1;
6605 inst.operands[i].present = 1;
6606 inst.operands[i].vectype = optype;
6607 }
6608 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6609 {
6610 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6611 inst.operands[i].reg = val;
6612 inst.operands[i].isreg = 1;
6613 inst.operands[i++].present = 1;
6614
6615 if (skip_past_comma (&ptr) == FAIL)
6616 goto wanted_comma;
6617
6618 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6619 == FAIL)
6620 {
6621 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6622 return FAIL;
6623 }
6624
6625 inst.operands[i].reg = val;
6626 inst.operands[i].isreg = 1;
6627 inst.operands[i].isvec = 1;
6628 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6629 inst.operands[i].vectype = optype;
6630 inst.operands[i].present = 1;
6631
6632 if (rtype == REG_TYPE_VFS)
6633 {
6634 /* Case 14. */
6635 i++;
6636 if (skip_past_comma (&ptr) == FAIL)
6637 goto wanted_comma;
6638 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6639 &optype)) == FAIL)
6640 {
6641 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6642 return FAIL;
6643 }
6644 inst.operands[i].reg = val;
6645 inst.operands[i].isreg = 1;
6646 inst.operands[i].isvec = 1;
6647 inst.operands[i].issingle = 1;
6648 inst.operands[i].vectype = optype;
6649 inst.operands[i].present = 1;
6650 }
6651 }
6652 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6653 != FAIL)
6654 {
6655 /* Case 13. */
6656 inst.operands[i].reg = val;
6657 inst.operands[i].isreg = 1;
6658 inst.operands[i].isvec = 1;
6659 inst.operands[i].issingle = 1;
6660 inst.operands[i].vectype = optype;
6661 inst.operands[i].present = 1;
6662 }
6663 }
6664 else
6665 {
6666 first_error (_("parse error"));
6667 return FAIL;
6668 }
6669
6670 /* Successfully parsed the operands. Update args. */
6671 *which_operand = i;
6672 *str = ptr;
6673 return SUCCESS;
6674
6675 wanted_comma:
6676 first_error (_("expected comma"));
6677 return FAIL;
6678
6679 wanted_arm:
6680 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6681 return FAIL;
6682 }
6683
6684 /* Use this macro when the operand constraints are different
6685 for ARM and THUMB (e.g. ldrd). */
6686 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6687 ((arm_operand) | ((thumb_operand) << 16))
6688
6689 /* Matcher codes for parse_operands. */
6690 enum operand_parse_code
6691 {
6692 OP_stop, /* end of line */
6693
6694 OP_RR, /* ARM register */
6695 OP_RRnpc, /* ARM register, not r15 */
6696 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6697 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6698 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6699 optional trailing ! */
6700 OP_RRw, /* ARM register, not r15, optional trailing ! */
6701 OP_RCP, /* Coprocessor number */
6702 OP_RCN, /* Coprocessor register */
6703 OP_RF, /* FPA register */
6704 OP_RVS, /* VFP single precision register */
6705 OP_RVD, /* VFP double precision register (0..15) */
6706 OP_RND, /* Neon double precision register (0..31) */
6707 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
6708 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
6709 */
6710 OP_RNQ, /* Neon quad precision register */
6711 OP_RNQMQ, /* Neon quad or MVE vector register. */
6712 OP_RVSD, /* VFP single or double precision register */
6713 OP_RNSD, /* Neon single or double precision register */
6714 OP_RNDQ, /* Neon double or quad precision register */
6715 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
6716 OP_RNSDQ, /* Neon single, double or quad precision register */
6717 OP_RNSC, /* Neon scalar D[X] */
6718 OP_RVC, /* VFP control register */
6719 OP_RMF, /* Maverick F register */
6720 OP_RMD, /* Maverick D register */
6721 OP_RMFX, /* Maverick FX register */
6722 OP_RMDX, /* Maverick DX register */
6723 OP_RMAX, /* Maverick AX register */
6724 OP_RMDS, /* Maverick DSPSC register */
6725 OP_RIWR, /* iWMMXt wR register */
6726 OP_RIWC, /* iWMMXt wC register */
6727 OP_RIWG, /* iWMMXt wCG register */
6728 OP_RXA, /* XScale accumulator register */
6729
6730 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
6731 */
6732 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
6733 GPR (no SP/SP) */
6734 OP_RMQ, /* MVE vector register. */
6735
6736 /* New operands for Armv8.1-M Mainline. */
6737 OP_LR, /* ARM LR register */
6738 OP_RRe, /* ARM register, only even numbered. */
6739 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
6740 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6741
6742 OP_REGLST, /* ARM register list */
6743 OP_CLRMLST, /* CLRM register list */
6744 OP_VRSLST, /* VFP single-precision register list */
6745 OP_VRDLST, /* VFP double-precision register list */
6746 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6747 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6748 OP_NSTRLST, /* Neon element/structure list */
6749 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6750
6751 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6752 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6753 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6754 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6755 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6756 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6757 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6758 */
6759 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6760 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6761 OP_VMOV, /* Neon VMOV operands. */
6762 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6763 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6764 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6765 OP_VLDR, /* VLDR operand. */
6766
6767 OP_I0, /* immediate zero */
6768 OP_I7, /* immediate value 0 .. 7 */
6769 OP_I15, /* 0 .. 15 */
6770 OP_I16, /* 1 .. 16 */
6771 OP_I16z, /* 0 .. 16 */
6772 OP_I31, /* 0 .. 31 */
6773 OP_I31w, /* 0 .. 31, optional trailing ! */
6774 OP_I32, /* 1 .. 32 */
6775 OP_I32z, /* 0 .. 32 */
6776 OP_I63, /* 0 .. 63 */
6777 OP_I63s, /* -64 .. 63 */
6778 OP_I64, /* 1 .. 64 */
6779 OP_I64z, /* 0 .. 64 */
6780 OP_I255, /* 0 .. 255 */
6781
6782 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6783 OP_I7b, /* 0 .. 7 */
6784 OP_I15b, /* 0 .. 15 */
6785 OP_I31b, /* 0 .. 31 */
6786
6787 OP_SH, /* shifter operand */
6788 OP_SHG, /* shifter operand with possible group relocation */
6789 OP_ADDR, /* Memory address expression (any mode) */
6790 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6791 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6792 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6793 OP_EXP, /* arbitrary expression */
6794 OP_EXPi, /* same, with optional immediate prefix */
6795 OP_EXPr, /* same, with optional relocation suffix */
6796 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6797 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6798 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6799 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6800
6801 OP_CPSF, /* CPS flags */
6802 OP_ENDI, /* Endianness specifier */
6803 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6804 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6805 OP_COND, /* conditional code */
6806 OP_TB, /* Table branch. */
6807
6808 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6809
6810 OP_RRnpc_I0, /* ARM register or literal 0 */
6811 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6812 OP_RR_EXi, /* ARM register or expression with imm prefix */
6813 OP_RF_IF, /* FPA register or immediate */
6814 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6815 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6816
6817 /* Optional operands. */
6818 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6819 OP_oI31b, /* 0 .. 31 */
6820 OP_oI32b, /* 1 .. 32 */
6821 OP_oI32z, /* 0 .. 32 */
6822 OP_oIffffb, /* 0 .. 65535 */
6823 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6824
6825 OP_oRR, /* ARM register */
6826 OP_oLR, /* ARM LR register */
6827 OP_oRRnpc, /* ARM register, not the PC */
6828 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6829 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6830 OP_oRND, /* Optional Neon double precision register */
6831 OP_oRNQ, /* Optional Neon quad precision register */
6832 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
6833 OP_oRNDQ, /* Optional Neon double or quad precision register */
6834 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6835 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
6836 register. */
6837 OP_oSHll, /* LSL immediate */
6838 OP_oSHar, /* ASR immediate */
6839 OP_oSHllar, /* LSL or ASR immediate */
6840 OP_oROR, /* ROR 0/8/16/24 */
6841 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6842
6843 /* Some pre-defined mixed (ARM/THUMB) operands. */
6844 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6845 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6846 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6847
6848 OP_FIRST_OPTIONAL = OP_oI7b
6849 };
6850
6851 /* Generic instruction operand parser. This does no encoding and no
6852 semantic validation; it merely squirrels values away in the inst
6853 structure. Returns SUCCESS or FAIL depending on whether the
6854 specified grammar matched. */
6855 static int
6856 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6857 {
6858 unsigned const int *upat = pattern;
6859 char *backtrack_pos = 0;
6860 const char *backtrack_error = 0;
6861 int i, val = 0, backtrack_index = 0;
6862 enum arm_reg_type rtype;
6863 parse_operand_result result;
6864 unsigned int op_parse_code;
6865 bfd_boolean partial_match;
6866
6867 #define po_char_or_fail(chr) \
6868 do \
6869 { \
6870 if (skip_past_char (&str, chr) == FAIL) \
6871 goto bad_args; \
6872 } \
6873 while (0)
6874
6875 #define po_reg_or_fail(regtype) \
6876 do \
6877 { \
6878 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6879 & inst.operands[i].vectype); \
6880 if (val == FAIL) \
6881 { \
6882 first_error (_(reg_expected_msgs[regtype])); \
6883 goto failure; \
6884 } \
6885 inst.operands[i].reg = val; \
6886 inst.operands[i].isreg = 1; \
6887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6889 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6890 || rtype == REG_TYPE_VFD \
6891 || rtype == REG_TYPE_NQ); \
6892 } \
6893 while (0)
6894
6895 #define po_reg_or_goto(regtype, label) \
6896 do \
6897 { \
6898 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6899 & inst.operands[i].vectype); \
6900 if (val == FAIL) \
6901 goto label; \
6902 \
6903 inst.operands[i].reg = val; \
6904 inst.operands[i].isreg = 1; \
6905 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6906 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6907 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6908 || rtype == REG_TYPE_VFD \
6909 || rtype == REG_TYPE_NQ); \
6910 } \
6911 while (0)
6912
6913 #define po_imm_or_fail(min, max, popt) \
6914 do \
6915 { \
6916 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6917 goto failure; \
6918 inst.operands[i].imm = val; \
6919 } \
6920 while (0)
6921
6922 #define po_scalar_or_goto(elsz, label) \
6923 do \
6924 { \
6925 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6926 if (val == FAIL) \
6927 goto label; \
6928 inst.operands[i].reg = val; \
6929 inst.operands[i].isscalar = 1; \
6930 } \
6931 while (0)
6932
6933 #define po_misc_or_fail(expr) \
6934 do \
6935 { \
6936 if (expr) \
6937 goto failure; \
6938 } \
6939 while (0)
6940
6941 #define po_misc_or_fail_no_backtrack(expr) \
6942 do \
6943 { \
6944 result = expr; \
6945 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6946 backtrack_pos = 0; \
6947 if (result != PARSE_OPERAND_SUCCESS) \
6948 goto failure; \
6949 } \
6950 while (0)
6951
6952 #define po_barrier_or_imm(str) \
6953 do \
6954 { \
6955 val = parse_barrier (&str); \
6956 if (val == FAIL && ! ISALPHA (*str)) \
6957 goto immediate; \
6958 if (val == FAIL \
6959 /* ISB can only take SY as an option. */ \
6960 || ((inst.instruction & 0xf0) == 0x60 \
6961 && val != 0xf)) \
6962 { \
6963 inst.error = _("invalid barrier type"); \
6964 backtrack_pos = 0; \
6965 goto failure; \
6966 } \
6967 } \
6968 while (0)
6969
6970 skip_whitespace (str);
6971
6972 for (i = 0; upat[i] != OP_stop; i++)
6973 {
6974 op_parse_code = upat[i];
6975 if (op_parse_code >= 1<<16)
6976 op_parse_code = thumb ? (op_parse_code >> 16)
6977 : (op_parse_code & ((1<<16)-1));
6978
6979 if (op_parse_code >= OP_FIRST_OPTIONAL)
6980 {
6981 /* Remember where we are in case we need to backtrack. */
6982 gas_assert (!backtrack_pos);
6983 backtrack_pos = str;
6984 backtrack_error = inst.error;
6985 backtrack_index = i;
6986 }
6987
6988 if (i > 0 && (i > 1 || inst.operands[0].present))
6989 po_char_or_fail (',');
6990
6991 switch (op_parse_code)
6992 {
6993 /* Registers */
6994 case OP_oRRnpc:
6995 case OP_oRRnpcsp:
6996 case OP_RRnpc:
6997 case OP_RRnpcsp:
6998 case OP_oRR:
6999 case OP_RRe:
7000 case OP_RRo:
7001 case OP_LR:
7002 case OP_oLR:
7003 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7004 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7005 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7006 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7007 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7008 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7009 case OP_oRND:
7010 case OP_RNDMQR:
7011 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7012 break;
7013 try_rndmq:
7014 case OP_RNDMQ:
7015 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7016 break;
7017 try_rnd:
7018 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7019 case OP_RVC:
7020 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7021 break;
7022 /* Also accept generic coprocessor regs for unknown registers. */
7023 coproc_reg:
7024 po_reg_or_fail (REG_TYPE_CN);
7025 break;
7026 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7027 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7028 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7029 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7030 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7031 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7032 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7033 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7034 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7035 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7036 case OP_oRNQ:
7037 case OP_RNQMQ:
7038 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7039 break;
7040 try_nq:
7041 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7042 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7043 case OP_oRNDQMQ:
7044 case OP_RNDQMQ:
7045 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7046 break;
7047 try_rndq:
7048 case OP_oRNDQ:
7049 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7050 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7051 case OP_oRNSDQ:
7052 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7053 case OP_RNSDQMQR:
7054 po_reg_or_goto (REG_TYPE_RN, try_mq);
7055 break;
7056 try_mq:
7057 case OP_oRNSDQMQ:
7058 case OP_RNSDQMQ:
7059 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7060 break;
7061 try_nsdq2:
7062 po_reg_or_fail (REG_TYPE_NSDQ);
7063 inst.error = 0;
7064 break;
7065 case OP_RMQ:
7066 po_reg_or_fail (REG_TYPE_MQ);
7067 break;
7068 /* Neon scalar. Using an element size of 8 means that some invalid
7069 scalars are accepted here, so deal with those in later code. */
7070 case OP_RNSC: po_scalar_or_goto (8, failure); break;
7071
7072 case OP_RNDQ_I0:
7073 {
7074 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7075 break;
7076 try_imm0:
7077 po_imm_or_fail (0, 0, TRUE);
7078 }
7079 break;
7080
7081 case OP_RVSD_I0:
7082 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7083 break;
7084
7085 case OP_RSVD_FI0:
7086 {
7087 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7088 break;
7089 try_ifimm0:
7090 if (parse_ifimm_zero (&str))
7091 inst.operands[i].imm = 0;
7092 else
7093 {
7094 inst.error
7095 = _("only floating point zero is allowed as immediate value");
7096 goto failure;
7097 }
7098 }
7099 break;
7100
7101 case OP_RR_RNSC:
7102 {
7103 po_scalar_or_goto (8, try_rr);
7104 break;
7105 try_rr:
7106 po_reg_or_fail (REG_TYPE_RN);
7107 }
7108 break;
7109
7110 case OP_RNSDQ_RNSC_MQ:
7111 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7112 break;
7113 try_rnsdq_rnsc:
7114 case OP_RNSDQ_RNSC:
7115 {
7116 po_scalar_or_goto (8, try_nsdq);
7117 break;
7118 try_nsdq:
7119 po_reg_or_fail (REG_TYPE_NSDQ);
7120 }
7121 break;
7122
7123 case OP_RNSD_RNSC:
7124 {
7125 po_scalar_or_goto (8, try_s_scalar);
7126 break;
7127 try_s_scalar:
7128 po_scalar_or_goto (4, try_nsd);
7129 break;
7130 try_nsd:
7131 po_reg_or_fail (REG_TYPE_NSD);
7132 }
7133 break;
7134
7135 case OP_RNDQ_RNSC:
7136 {
7137 po_scalar_or_goto (8, try_ndq);
7138 break;
7139 try_ndq:
7140 po_reg_or_fail (REG_TYPE_NDQ);
7141 }
7142 break;
7143
7144 case OP_RND_RNSC:
7145 {
7146 po_scalar_or_goto (8, try_vfd);
7147 break;
7148 try_vfd:
7149 po_reg_or_fail (REG_TYPE_VFD);
7150 }
7151 break;
7152
7153 case OP_VMOV:
7154 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7155 not careful then bad things might happen. */
7156 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7157 break;
7158
7159 case OP_RNDQ_Ibig:
7160 {
7161 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7162 break;
7163 try_immbig:
7164 /* There's a possibility of getting a 64-bit immediate here, so
7165 we need special handling. */
7166 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7167 == FAIL)
7168 {
7169 inst.error = _("immediate value is out of range");
7170 goto failure;
7171 }
7172 }
7173 break;
7174
7175 case OP_RNDQ_I63b:
7176 {
7177 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7178 break;
7179 try_shimm:
7180 po_imm_or_fail (0, 63, TRUE);
7181 }
7182 break;
7183
7184 case OP_RRnpcb:
7185 po_char_or_fail ('[');
7186 po_reg_or_fail (REG_TYPE_RN);
7187 po_char_or_fail (']');
7188 break;
7189
7190 case OP_RRnpctw:
7191 case OP_RRw:
7192 case OP_oRRw:
7193 po_reg_or_fail (REG_TYPE_RN);
7194 if (skip_past_char (&str, '!') == SUCCESS)
7195 inst.operands[i].writeback = 1;
7196 break;
7197
7198 /* Immediates */
7199 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7200 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7201 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7202 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7203 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7204 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7205 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7206 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7207 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7208 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7209 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7210 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7211
7212 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7213 case OP_oI7b:
7214 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7215 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7216 case OP_oI31b:
7217 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7218 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7219 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7220 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7221
7222 /* Immediate variants */
7223 case OP_oI255c:
7224 po_char_or_fail ('{');
7225 po_imm_or_fail (0, 255, TRUE);
7226 po_char_or_fail ('}');
7227 break;
7228
7229 case OP_I31w:
7230 /* The expression parser chokes on a trailing !, so we have
7231 to find it first and zap it. */
7232 {
7233 char *s = str;
7234 while (*s && *s != ',')
7235 s++;
7236 if (s[-1] == '!')
7237 {
7238 s[-1] = '\0';
7239 inst.operands[i].writeback = 1;
7240 }
7241 po_imm_or_fail (0, 31, TRUE);
7242 if (str == s - 1)
7243 str = s;
7244 }
7245 break;
7246
7247 /* Expressions */
7248 case OP_EXPi: EXPi:
7249 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7250 GE_OPT_PREFIX));
7251 break;
7252
7253 case OP_EXP:
7254 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7255 GE_NO_PREFIX));
7256 break;
7257
7258 case OP_EXPr: EXPr:
7259 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7260 GE_NO_PREFIX));
7261 if (inst.relocs[0].exp.X_op == O_symbol)
7262 {
7263 val = parse_reloc (&str);
7264 if (val == -1)
7265 {
7266 inst.error = _("unrecognized relocation suffix");
7267 goto failure;
7268 }
7269 else if (val != BFD_RELOC_UNUSED)
7270 {
7271 inst.operands[i].imm = val;
7272 inst.operands[i].hasreloc = 1;
7273 }
7274 }
7275 break;
7276
7277 case OP_EXPs:
7278 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7279 GE_NO_PREFIX));
7280 if (inst.relocs[i].exp.X_op == O_symbol)
7281 {
7282 inst.operands[i].hasreloc = 1;
7283 }
7284 else if (inst.relocs[i].exp.X_op == O_constant)
7285 {
7286 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7287 inst.operands[i].hasreloc = 0;
7288 }
7289 break;
7290
7291 /* Operand for MOVW or MOVT. */
7292 case OP_HALF:
7293 po_misc_or_fail (parse_half (&str));
7294 break;
7295
7296 /* Register or expression. */
7297 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7298 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7299
7300 /* Register or immediate. */
7301 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7302 I0: po_imm_or_fail (0, 0, FALSE); break;
7303
7304 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7305 IF:
7306 if (!is_immediate_prefix (*str))
7307 goto bad_args;
7308 str++;
7309 val = parse_fpa_immediate (&str);
7310 if (val == FAIL)
7311 goto failure;
7312 /* FPA immediates are encoded as registers 8-15.
7313 parse_fpa_immediate has already applied the offset. */
7314 inst.operands[i].reg = val;
7315 inst.operands[i].isreg = 1;
7316 break;
7317
7318 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7319 I32z: po_imm_or_fail (0, 32, FALSE); break;
7320
7321 /* Two kinds of register. */
7322 case OP_RIWR_RIWC:
7323 {
7324 struct reg_entry *rege = arm_reg_parse_multi (&str);
7325 if (!rege
7326 || (rege->type != REG_TYPE_MMXWR
7327 && rege->type != REG_TYPE_MMXWC
7328 && rege->type != REG_TYPE_MMXWCG))
7329 {
7330 inst.error = _("iWMMXt data or control register expected");
7331 goto failure;
7332 }
7333 inst.operands[i].reg = rege->number;
7334 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7335 }
7336 break;
7337
7338 case OP_RIWC_RIWG:
7339 {
7340 struct reg_entry *rege = arm_reg_parse_multi (&str);
7341 if (!rege
7342 || (rege->type != REG_TYPE_MMXWC
7343 && rege->type != REG_TYPE_MMXWCG))
7344 {
7345 inst.error = _("iWMMXt control register expected");
7346 goto failure;
7347 }
7348 inst.operands[i].reg = rege->number;
7349 inst.operands[i].isreg = 1;
7350 }
7351 break;
7352
7353 /* Misc */
7354 case OP_CPSF: val = parse_cps_flags (&str); break;
7355 case OP_ENDI: val = parse_endian_specifier (&str); break;
7356 case OP_oROR: val = parse_ror (&str); break;
7357 case OP_COND: val = parse_cond (&str); break;
7358 case OP_oBARRIER_I15:
7359 po_barrier_or_imm (str); break;
7360 immediate:
7361 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7362 goto failure;
7363 break;
7364
7365 case OP_wPSR:
7366 case OP_rPSR:
7367 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7368 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7369 {
7370 inst.error = _("Banked registers are not available with this "
7371 "architecture.");
7372 goto failure;
7373 }
7374 break;
7375 try_psr:
7376 val = parse_psr (&str, op_parse_code == OP_wPSR);
7377 break;
7378
7379 case OP_VLDR:
7380 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7381 break;
7382 try_sysreg:
7383 val = parse_sys_vldr_vstr (&str);
7384 break;
7385
7386 case OP_APSR_RR:
7387 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7388 break;
7389 try_apsr:
7390 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7391 instruction). */
7392 if (strncasecmp (str, "APSR_", 5) == 0)
7393 {
7394 unsigned found = 0;
7395 str += 5;
7396 while (found < 15)
7397 switch (*str++)
7398 {
7399 case 'c': found = (found & 1) ? 16 : found | 1; break;
7400 case 'n': found = (found & 2) ? 16 : found | 2; break;
7401 case 'z': found = (found & 4) ? 16 : found | 4; break;
7402 case 'v': found = (found & 8) ? 16 : found | 8; break;
7403 default: found = 16;
7404 }
7405 if (found != 15)
7406 goto failure;
7407 inst.operands[i].isvec = 1;
7408 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7409 inst.operands[i].reg = REG_PC;
7410 }
7411 else
7412 goto failure;
7413 break;
7414
7415 case OP_TB:
7416 po_misc_or_fail (parse_tb (&str));
7417 break;
7418
7419 /* Register lists. */
7420 case OP_REGLST:
7421 val = parse_reg_list (&str, REGLIST_RN);
7422 if (*str == '^')
7423 {
7424 inst.operands[i].writeback = 1;
7425 str++;
7426 }
7427 break;
7428
7429 case OP_CLRMLST:
7430 val = parse_reg_list (&str, REGLIST_CLRM);
7431 break;
7432
7433 case OP_VRSLST:
7434 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7435 &partial_match);
7436 break;
7437
7438 case OP_VRDLST:
7439 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7440 &partial_match);
7441 break;
7442
7443 case OP_VRSDLST:
7444 /* Allow Q registers too. */
7445 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7446 REGLIST_NEON_D, &partial_match);
7447 if (val == FAIL)
7448 {
7449 inst.error = NULL;
7450 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7451 REGLIST_VFP_S, &partial_match);
7452 inst.operands[i].issingle = 1;
7453 }
7454 break;
7455
7456 case OP_VRSDVLST:
7457 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7458 REGLIST_VFP_D_VPR, &partial_match);
7459 if (val == FAIL && !partial_match)
7460 {
7461 inst.error = NULL;
7462 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7463 REGLIST_VFP_S_VPR, &partial_match);
7464 inst.operands[i].issingle = 1;
7465 }
7466 break;
7467
7468 case OP_NRDLST:
7469 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7470 REGLIST_NEON_D, &partial_match);
7471 break;
7472
7473 case OP_NSTRLST:
7474 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7475 &inst.operands[i].vectype);
7476 break;
7477
7478 /* Addressing modes */
7479 case OP_ADDR:
7480 po_misc_or_fail (parse_address (&str, i));
7481 break;
7482
7483 case OP_ADDRGLDR:
7484 po_misc_or_fail_no_backtrack (
7485 parse_address_group_reloc (&str, i, GROUP_LDR));
7486 break;
7487
7488 case OP_ADDRGLDRS:
7489 po_misc_or_fail_no_backtrack (
7490 parse_address_group_reloc (&str, i, GROUP_LDRS));
7491 break;
7492
7493 case OP_ADDRGLDC:
7494 po_misc_or_fail_no_backtrack (
7495 parse_address_group_reloc (&str, i, GROUP_LDC));
7496 break;
7497
7498 case OP_SH:
7499 po_misc_or_fail (parse_shifter_operand (&str, i));
7500 break;
7501
7502 case OP_SHG:
7503 po_misc_or_fail_no_backtrack (
7504 parse_shifter_operand_group_reloc (&str, i));
7505 break;
7506
7507 case OP_oSHll:
7508 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7509 break;
7510
7511 case OP_oSHar:
7512 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7513 break;
7514
7515 case OP_oSHllar:
7516 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7517 break;
7518
7519 default:
7520 as_fatal (_("unhandled operand code %d"), op_parse_code);
7521 }
7522
7523 /* Various value-based sanity checks and shared operations. We
7524 do not signal immediate failures for the register constraints;
7525 this allows a syntax error to take precedence. */
7526 switch (op_parse_code)
7527 {
7528 case OP_oRRnpc:
7529 case OP_RRnpc:
7530 case OP_RRnpcb:
7531 case OP_RRw:
7532 case OP_oRRw:
7533 case OP_RRnpc_I0:
7534 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7535 inst.error = BAD_PC;
7536 break;
7537
7538 case OP_oRRnpcsp:
7539 case OP_RRnpcsp:
7540 if (inst.operands[i].isreg)
7541 {
7542 if (inst.operands[i].reg == REG_PC)
7543 inst.error = BAD_PC;
7544 else if (inst.operands[i].reg == REG_SP
7545 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7546 relaxed since ARMv8-A. */
7547 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7548 {
7549 gas_assert (thumb);
7550 inst.error = BAD_SP;
7551 }
7552 }
7553 break;
7554
7555 case OP_RRnpctw:
7556 if (inst.operands[i].isreg
7557 && inst.operands[i].reg == REG_PC
7558 && (inst.operands[i].writeback || thumb))
7559 inst.error = BAD_PC;
7560 break;
7561
7562 case OP_VLDR:
7563 if (inst.operands[i].isreg)
7564 break;
7565 /* fall through. */
7566 case OP_CPSF:
7567 case OP_ENDI:
7568 case OP_oROR:
7569 case OP_wPSR:
7570 case OP_rPSR:
7571 case OP_COND:
7572 case OP_oBARRIER_I15:
7573 case OP_REGLST:
7574 case OP_CLRMLST:
7575 case OP_VRSLST:
7576 case OP_VRDLST:
7577 case OP_VRSDLST:
7578 case OP_VRSDVLST:
7579 case OP_NRDLST:
7580 case OP_NSTRLST:
7581 if (val == FAIL)
7582 goto failure;
7583 inst.operands[i].imm = val;
7584 break;
7585
7586 case OP_LR:
7587 case OP_oLR:
7588 if (inst.operands[i].reg != REG_LR)
7589 inst.error = _("operand must be LR register");
7590 break;
7591
7592 case OP_RRe:
7593 if (inst.operands[i].isreg
7594 && (inst.operands[i].reg & 0x00000001) != 0)
7595 inst.error = BAD_ODD;
7596 break;
7597
7598 case OP_RRo:
7599 if (inst.operands[i].isreg)
7600 {
7601 if ((inst.operands[i].reg & 0x00000001) != 1)
7602 inst.error = BAD_EVEN;
7603 else if (inst.operands[i].reg == REG_SP)
7604 as_tsktsk (MVE_BAD_SP);
7605 else if (inst.operands[i].reg == REG_PC)
7606 inst.error = BAD_PC;
7607 }
7608 break;
7609
7610 default:
7611 break;
7612 }
7613
7614 /* If we get here, this operand was successfully parsed. */
7615 inst.operands[i].present = 1;
7616 continue;
7617
7618 bad_args:
7619 inst.error = BAD_ARGS;
7620
7621 failure:
7622 if (!backtrack_pos)
7623 {
7624 /* The parse routine should already have set inst.error, but set a
7625 default here just in case. */
7626 if (!inst.error)
7627 inst.error = BAD_SYNTAX;
7628 return FAIL;
7629 }
7630
7631 /* Do not backtrack over a trailing optional argument that
7632 absorbed some text. We will only fail again, with the
7633 'garbage following instruction' error message, which is
7634 probably less helpful than the current one. */
7635 if (backtrack_index == i && backtrack_pos != str
7636 && upat[i+1] == OP_stop)
7637 {
7638 if (!inst.error)
7639 inst.error = BAD_SYNTAX;
7640 return FAIL;
7641 }
7642
7643 /* Try again, skipping the optional argument at backtrack_pos. */
7644 str = backtrack_pos;
7645 inst.error = backtrack_error;
7646 inst.operands[backtrack_index].present = 0;
7647 i = backtrack_index;
7648 backtrack_pos = 0;
7649 }
7650
7651 /* Check that we have parsed all the arguments. */
7652 if (*str != '\0' && !inst.error)
7653 inst.error = _("garbage following instruction");
7654
7655 return inst.error ? FAIL : SUCCESS;
7656 }
7657
7658 #undef po_char_or_fail
7659 #undef po_reg_or_fail
7660 #undef po_reg_or_goto
7661 #undef po_imm_or_fail
7662 #undef po_scalar_or_fail
7663 #undef po_barrier_or_imm
7664
7665 /* Shorthand macro for instruction encoding functions issuing errors. */
7666 #define constraint(expr, err) \
7667 do \
7668 { \
7669 if (expr) \
7670 { \
7671 inst.error = err; \
7672 return; \
7673 } \
7674 } \
7675 while (0)
7676
7677 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7678 instructions are unpredictable if these registers are used. This
7679 is the BadReg predicate in ARM's Thumb-2 documentation.
7680
7681 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7682 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7683 #define reject_bad_reg(reg) \
7684 do \
7685 if (reg == REG_PC) \
7686 { \
7687 inst.error = BAD_PC; \
7688 return; \
7689 } \
7690 else if (reg == REG_SP \
7691 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7692 { \
7693 inst.error = BAD_SP; \
7694 return; \
7695 } \
7696 while (0)
7697
7698 /* If REG is R13 (the stack pointer), warn that its use is
7699 deprecated. */
7700 #define warn_deprecated_sp(reg) \
7701 do \
7702 if (warn_on_deprecated && reg == REG_SP) \
7703 as_tsktsk (_("use of r13 is deprecated")); \
7704 while (0)
7705
7706 /* Functions for operand encoding. ARM, then Thumb. */
7707
7708 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7709
7710 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7711
7712 The only binary encoding difference is the Coprocessor number. Coprocessor
7713 9 is used for half-precision calculations or conversions. The format of the
7714 instruction is the same as the equivalent Coprocessor 10 instruction that
7715 exists for Single-Precision operation. */
7716
7717 static void
7718 do_scalar_fp16_v82_encode (void)
7719 {
7720 if (inst.cond < COND_ALWAYS)
7721 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7722 " the behaviour is UNPREDICTABLE"));
7723 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7724 _(BAD_FP16));
7725
7726 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7727 mark_feature_used (&arm_ext_fp16);
7728 }
7729
7730 /* If VAL can be encoded in the immediate field of an ARM instruction,
7731 return the encoded form. Otherwise, return FAIL. */
7732
7733 static unsigned int
7734 encode_arm_immediate (unsigned int val)
7735 {
7736 unsigned int a, i;
7737
7738 if (val <= 0xff)
7739 return val;
7740
7741 for (i = 2; i < 32; i += 2)
7742 if ((a = rotate_left (val, i)) <= 0xff)
7743 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7744
7745 return FAIL;
7746 }
7747
7748 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7749 return the encoded form. Otherwise, return FAIL. */
7750 static unsigned int
7751 encode_thumb32_immediate (unsigned int val)
7752 {
7753 unsigned int a, i;
7754
7755 if (val <= 0xff)
7756 return val;
7757
7758 for (i = 1; i <= 24; i++)
7759 {
7760 a = val >> i;
7761 if ((val & ~(0xff << i)) == 0)
7762 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7763 }
7764
7765 a = val & 0xff;
7766 if (val == ((a << 16) | a))
7767 return 0x100 | a;
7768 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7769 return 0x300 | a;
7770
7771 a = val & 0xff00;
7772 if (val == ((a << 16) | a))
7773 return 0x200 | (a >> 8);
7774
7775 return FAIL;
7776 }
7777 /* Encode a VFP SP or DP register number into inst.instruction. */
7778
7779 static void
7780 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7781 {
7782 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7783 && reg > 15)
7784 {
7785 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7786 {
7787 if (thumb_mode)
7788 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7789 fpu_vfp_ext_d32);
7790 else
7791 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7792 fpu_vfp_ext_d32);
7793 }
7794 else
7795 {
7796 first_error (_("D register out of range for selected VFP version"));
7797 return;
7798 }
7799 }
7800
7801 switch (pos)
7802 {
7803 case VFP_REG_Sd:
7804 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7805 break;
7806
7807 case VFP_REG_Sn:
7808 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7809 break;
7810
7811 case VFP_REG_Sm:
7812 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7813 break;
7814
7815 case VFP_REG_Dd:
7816 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7817 break;
7818
7819 case VFP_REG_Dn:
7820 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7821 break;
7822
7823 case VFP_REG_Dm:
7824 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7825 break;
7826
7827 default:
7828 abort ();
7829 }
7830 }
7831
7832 /* Encode a <shift> in an ARM-format instruction. The immediate,
7833 if any, is handled by md_apply_fix. */
7834 static void
7835 encode_arm_shift (int i)
7836 {
7837 /* register-shifted register. */
7838 if (inst.operands[i].immisreg)
7839 {
7840 int op_index;
7841 for (op_index = 0; op_index <= i; ++op_index)
7842 {
7843 /* Check the operand only when it's presented. In pre-UAL syntax,
7844 if the destination register is the same as the first operand, two
7845 register form of the instruction can be used. */
7846 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7847 && inst.operands[op_index].reg == REG_PC)
7848 as_warn (UNPRED_REG ("r15"));
7849 }
7850
7851 if (inst.operands[i].imm == REG_PC)
7852 as_warn (UNPRED_REG ("r15"));
7853 }
7854
7855 if (inst.operands[i].shift_kind == SHIFT_RRX)
7856 inst.instruction |= SHIFT_ROR << 5;
7857 else
7858 {
7859 inst.instruction |= inst.operands[i].shift_kind << 5;
7860 if (inst.operands[i].immisreg)
7861 {
7862 inst.instruction |= SHIFT_BY_REG;
7863 inst.instruction |= inst.operands[i].imm << 8;
7864 }
7865 else
7866 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7867 }
7868 }
7869
7870 static void
7871 encode_arm_shifter_operand (int i)
7872 {
7873 if (inst.operands[i].isreg)
7874 {
7875 inst.instruction |= inst.operands[i].reg;
7876 encode_arm_shift (i);
7877 }
7878 else
7879 {
7880 inst.instruction |= INST_IMMEDIATE;
7881 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7882 inst.instruction |= inst.operands[i].imm;
7883 }
7884 }
7885
7886 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7887 static void
7888 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7889 {
7890 /* PR 14260:
7891 Generate an error if the operand is not a register. */
7892 constraint (!inst.operands[i].isreg,
7893 _("Instruction does not support =N addresses"));
7894
7895 inst.instruction |= inst.operands[i].reg << 16;
7896
7897 if (inst.operands[i].preind)
7898 {
7899 if (is_t)
7900 {
7901 inst.error = _("instruction does not accept preindexed addressing");
7902 return;
7903 }
7904 inst.instruction |= PRE_INDEX;
7905 if (inst.operands[i].writeback)
7906 inst.instruction |= WRITE_BACK;
7907
7908 }
7909 else if (inst.operands[i].postind)
7910 {
7911 gas_assert (inst.operands[i].writeback);
7912 if (is_t)
7913 inst.instruction |= WRITE_BACK;
7914 }
7915 else /* unindexed - only for coprocessor */
7916 {
7917 inst.error = _("instruction does not accept unindexed addressing");
7918 return;
7919 }
7920
7921 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7922 && (((inst.instruction & 0x000f0000) >> 16)
7923 == ((inst.instruction & 0x0000f000) >> 12)))
7924 as_warn ((inst.instruction & LOAD_BIT)
7925 ? _("destination register same as write-back base")
7926 : _("source register same as write-back base"));
7927 }
7928
7929 /* inst.operands[i] was set up by parse_address. Encode it into an
7930 ARM-format mode 2 load or store instruction. If is_t is true,
7931 reject forms that cannot be used with a T instruction (i.e. not
7932 post-indexed). */
7933 static void
7934 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7935 {
7936 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7937
7938 encode_arm_addr_mode_common (i, is_t);
7939
7940 if (inst.operands[i].immisreg)
7941 {
7942 constraint ((inst.operands[i].imm == REG_PC
7943 || (is_pc && inst.operands[i].writeback)),
7944 BAD_PC_ADDRESSING);
7945 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7946 inst.instruction |= inst.operands[i].imm;
7947 if (!inst.operands[i].negative)
7948 inst.instruction |= INDEX_UP;
7949 if (inst.operands[i].shifted)
7950 {
7951 if (inst.operands[i].shift_kind == SHIFT_RRX)
7952 inst.instruction |= SHIFT_ROR << 5;
7953 else
7954 {
7955 inst.instruction |= inst.operands[i].shift_kind << 5;
7956 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7957 }
7958 }
7959 }
7960 else /* immediate offset in inst.relocs[0] */
7961 {
7962 if (is_pc && !inst.relocs[0].pc_rel)
7963 {
7964 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7965
7966 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7967 cannot use PC in addressing.
7968 PC cannot be used in writeback addressing, either. */
7969 constraint ((is_t || inst.operands[i].writeback),
7970 BAD_PC_ADDRESSING);
7971
7972 /* Use of PC in str is deprecated for ARMv7. */
7973 if (warn_on_deprecated
7974 && !is_load
7975 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7976 as_tsktsk (_("use of PC in this instruction is deprecated"));
7977 }
7978
7979 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7980 {
7981 /* Prefer + for zero encoded value. */
7982 if (!inst.operands[i].negative)
7983 inst.instruction |= INDEX_UP;
7984 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
7985 }
7986 }
7987 }
7988
7989 /* inst.operands[i] was set up by parse_address. Encode it into an
7990 ARM-format mode 3 load or store instruction. Reject forms that
7991 cannot be used with such instructions. If is_t is true, reject
7992 forms that cannot be used with a T instruction (i.e. not
7993 post-indexed). */
7994 static void
7995 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7996 {
7997 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7998 {
7999 inst.error = _("instruction does not accept scaled register index");
8000 return;
8001 }
8002
8003 encode_arm_addr_mode_common (i, is_t);
8004
8005 if (inst.operands[i].immisreg)
8006 {
8007 constraint ((inst.operands[i].imm == REG_PC
8008 || (is_t && inst.operands[i].reg == REG_PC)),
8009 BAD_PC_ADDRESSING);
8010 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8011 BAD_PC_WRITEBACK);
8012 inst.instruction |= inst.operands[i].imm;
8013 if (!inst.operands[i].negative)
8014 inst.instruction |= INDEX_UP;
8015 }
8016 else /* immediate offset in inst.relocs[0] */
8017 {
8018 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8019 && inst.operands[i].writeback),
8020 BAD_PC_WRITEBACK);
8021 inst.instruction |= HWOFFSET_IMM;
8022 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8023 {
8024 /* Prefer + for zero encoded value. */
8025 if (!inst.operands[i].negative)
8026 inst.instruction |= INDEX_UP;
8027
8028 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8029 }
8030 }
8031 }
8032
8033 /* Write immediate bits [7:0] to the following locations:
8034
8035 |28/24|23 19|18 16|15 4|3 0|
8036 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8037
8038 This function is used by VMOV/VMVN/VORR/VBIC. */
8039
8040 static void
8041 neon_write_immbits (unsigned immbits)
8042 {
8043 inst.instruction |= immbits & 0xf;
8044 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8045 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8046 }
8047
8048 /* Invert low-order SIZE bits of XHI:XLO. */
8049
8050 static void
8051 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8052 {
8053 unsigned immlo = xlo ? *xlo : 0;
8054 unsigned immhi = xhi ? *xhi : 0;
8055
8056 switch (size)
8057 {
8058 case 8:
8059 immlo = (~immlo) & 0xff;
8060 break;
8061
8062 case 16:
8063 immlo = (~immlo) & 0xffff;
8064 break;
8065
8066 case 64:
8067 immhi = (~immhi) & 0xffffffff;
8068 /* fall through. */
8069
8070 case 32:
8071 immlo = (~immlo) & 0xffffffff;
8072 break;
8073
8074 default:
8075 abort ();
8076 }
8077
8078 if (xlo)
8079 *xlo = immlo;
8080
8081 if (xhi)
8082 *xhi = immhi;
8083 }
8084
8085 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8086 A, B, C, D. */
8087
8088 static int
8089 neon_bits_same_in_bytes (unsigned imm)
8090 {
8091 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8092 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8093 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8094 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8095 }
8096
8097 /* For immediate of above form, return 0bABCD. */
8098
8099 static unsigned
8100 neon_squash_bits (unsigned imm)
8101 {
8102 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8103 | ((imm & 0x01000000) >> 21);
8104 }
8105
8106 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8107
8108 static unsigned
8109 neon_qfloat_bits (unsigned imm)
8110 {
8111 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8112 }
8113
8114 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8115 the instruction. *OP is passed as the initial value of the op field, and
8116 may be set to a different value depending on the constant (i.e.
8117 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8118 MVN). If the immediate looks like a repeated pattern then also
8119 try smaller element sizes. */
8120
8121 static int
8122 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8123 unsigned *immbits, int *op, int size,
8124 enum neon_el_type type)
8125 {
8126 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8127 float. */
8128 if (type == NT_float && !float_p)
8129 return FAIL;
8130
8131 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8132 {
8133 if (size != 32 || *op == 1)
8134 return FAIL;
8135 *immbits = neon_qfloat_bits (immlo);
8136 return 0xf;
8137 }
8138
8139 if (size == 64)
8140 {
8141 if (neon_bits_same_in_bytes (immhi)
8142 && neon_bits_same_in_bytes (immlo))
8143 {
8144 if (*op == 1)
8145 return FAIL;
8146 *immbits = (neon_squash_bits (immhi) << 4)
8147 | neon_squash_bits (immlo);
8148 *op = 1;
8149 return 0xe;
8150 }
8151
8152 if (immhi != immlo)
8153 return FAIL;
8154 }
8155
8156 if (size >= 32)
8157 {
8158 if (immlo == (immlo & 0x000000ff))
8159 {
8160 *immbits = immlo;
8161 return 0x0;
8162 }
8163 else if (immlo == (immlo & 0x0000ff00))
8164 {
8165 *immbits = immlo >> 8;
8166 return 0x2;
8167 }
8168 else if (immlo == (immlo & 0x00ff0000))
8169 {
8170 *immbits = immlo >> 16;
8171 return 0x4;
8172 }
8173 else if (immlo == (immlo & 0xff000000))
8174 {
8175 *immbits = immlo >> 24;
8176 return 0x6;
8177 }
8178 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8179 {
8180 *immbits = (immlo >> 8) & 0xff;
8181 return 0xc;
8182 }
8183 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8184 {
8185 *immbits = (immlo >> 16) & 0xff;
8186 return 0xd;
8187 }
8188
8189 if ((immlo & 0xffff) != (immlo >> 16))
8190 return FAIL;
8191 immlo &= 0xffff;
8192 }
8193
8194 if (size >= 16)
8195 {
8196 if (immlo == (immlo & 0x000000ff))
8197 {
8198 *immbits = immlo;
8199 return 0x8;
8200 }
8201 else if (immlo == (immlo & 0x0000ff00))
8202 {
8203 *immbits = immlo >> 8;
8204 return 0xa;
8205 }
8206
8207 if ((immlo & 0xff) != (immlo >> 8))
8208 return FAIL;
8209 immlo &= 0xff;
8210 }
8211
8212 if (immlo == (immlo & 0x000000ff))
8213 {
8214 /* Don't allow MVN with 8-bit immediate. */
8215 if (*op == 1)
8216 return FAIL;
8217 *immbits = immlo;
8218 return 0xe;
8219 }
8220
8221 return FAIL;
8222 }
8223
8224 #if defined BFD_HOST_64_BIT
8225 /* Returns TRUE if double precision value V may be cast
8226 to single precision without loss of accuracy. */
8227
8228 static bfd_boolean
8229 is_double_a_single (bfd_int64_t v)
8230 {
8231 int exp = (int)((v >> 52) & 0x7FF);
8232 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8233
8234 return (exp == 0 || exp == 0x7FF
8235 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8236 && (mantissa & 0x1FFFFFFFl) == 0;
8237 }
8238
8239 /* Returns a double precision value casted to single precision
8240 (ignoring the least significant bits in exponent and mantissa). */
8241
8242 static int
8243 double_to_single (bfd_int64_t v)
8244 {
8245 int sign = (int) ((v >> 63) & 1l);
8246 int exp = (int) ((v >> 52) & 0x7FF);
8247 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8248
8249 if (exp == 0x7FF)
8250 exp = 0xFF;
8251 else
8252 {
8253 exp = exp - 1023 + 127;
8254 if (exp >= 0xFF)
8255 {
8256 /* Infinity. */
8257 exp = 0x7F;
8258 mantissa = 0;
8259 }
8260 else if (exp < 0)
8261 {
8262 /* No denormalized numbers. */
8263 exp = 0;
8264 mantissa = 0;
8265 }
8266 }
8267 mantissa >>= 29;
8268 return (sign << 31) | (exp << 23) | mantissa;
8269 }
8270 #endif /* BFD_HOST_64_BIT */
8271
8272 enum lit_type
8273 {
8274 CONST_THUMB,
8275 CONST_ARM,
8276 CONST_VEC
8277 };
8278
8279 static void do_vfp_nsyn_opcode (const char *);
8280
8281 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8282 Determine whether it can be performed with a move instruction; if
8283 it can, convert inst.instruction to that move instruction and
8284 return TRUE; if it can't, convert inst.instruction to a literal-pool
8285 load and return FALSE. If this is not a valid thing to do in the
8286 current context, set inst.error and return TRUE.
8287
8288 inst.operands[i] describes the destination register. */
8289
8290 static bfd_boolean
8291 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8292 {
8293 unsigned long tbit;
8294 bfd_boolean thumb_p = (t == CONST_THUMB);
8295 bfd_boolean arm_p = (t == CONST_ARM);
8296
8297 if (thumb_p)
8298 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8299 else
8300 tbit = LOAD_BIT;
8301
8302 if ((inst.instruction & tbit) == 0)
8303 {
8304 inst.error = _("invalid pseudo operation");
8305 return TRUE;
8306 }
8307
8308 if (inst.relocs[0].exp.X_op != O_constant
8309 && inst.relocs[0].exp.X_op != O_symbol
8310 && inst.relocs[0].exp.X_op != O_big)
8311 {
8312 inst.error = _("constant expression expected");
8313 return TRUE;
8314 }
8315
8316 if (inst.relocs[0].exp.X_op == O_constant
8317 || inst.relocs[0].exp.X_op == O_big)
8318 {
8319 #if defined BFD_HOST_64_BIT
8320 bfd_int64_t v;
8321 #else
8322 offsetT v;
8323 #endif
8324 if (inst.relocs[0].exp.X_op == O_big)
8325 {
8326 LITTLENUM_TYPE w[X_PRECISION];
8327 LITTLENUM_TYPE * l;
8328
8329 if (inst.relocs[0].exp.X_add_number == -1)
8330 {
8331 gen_to_words (w, X_PRECISION, E_PRECISION);
8332 l = w;
8333 /* FIXME: Should we check words w[2..5] ? */
8334 }
8335 else
8336 l = generic_bignum;
8337
8338 #if defined BFD_HOST_64_BIT
8339 v =
8340 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8341 << LITTLENUM_NUMBER_OF_BITS)
8342 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8343 << LITTLENUM_NUMBER_OF_BITS)
8344 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8345 << LITTLENUM_NUMBER_OF_BITS)
8346 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8347 #else
8348 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8349 | (l[0] & LITTLENUM_MASK);
8350 #endif
8351 }
8352 else
8353 v = inst.relocs[0].exp.X_add_number;
8354
8355 if (!inst.operands[i].issingle)
8356 {
8357 if (thumb_p)
8358 {
8359 /* LDR should not use lead in a flag-setting instruction being
8360 chosen so we do not check whether movs can be used. */
8361
8362 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8363 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8364 && inst.operands[i].reg != 13
8365 && inst.operands[i].reg != 15)
8366 {
8367 /* Check if on thumb2 it can be done with a mov.w, mvn or
8368 movw instruction. */
8369 unsigned int newimm;
8370 bfd_boolean isNegated;
8371
8372 newimm = encode_thumb32_immediate (v);
8373 if (newimm != (unsigned int) FAIL)
8374 isNegated = FALSE;
8375 else
8376 {
8377 newimm = encode_thumb32_immediate (~v);
8378 if (newimm != (unsigned int) FAIL)
8379 isNegated = TRUE;
8380 }
8381
8382 /* The number can be loaded with a mov.w or mvn
8383 instruction. */
8384 if (newimm != (unsigned int) FAIL
8385 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8386 {
8387 inst.instruction = (0xf04f0000 /* MOV.W. */
8388 | (inst.operands[i].reg << 8));
8389 /* Change to MOVN. */
8390 inst.instruction |= (isNegated ? 0x200000 : 0);
8391 inst.instruction |= (newimm & 0x800) << 15;
8392 inst.instruction |= (newimm & 0x700) << 4;
8393 inst.instruction |= (newimm & 0x0ff);
8394 return TRUE;
8395 }
8396 /* The number can be loaded with a movw instruction. */
8397 else if ((v & ~0xFFFF) == 0
8398 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8399 {
8400 int imm = v & 0xFFFF;
8401
8402 inst.instruction = 0xf2400000; /* MOVW. */
8403 inst.instruction |= (inst.operands[i].reg << 8);
8404 inst.instruction |= (imm & 0xf000) << 4;
8405 inst.instruction |= (imm & 0x0800) << 15;
8406 inst.instruction |= (imm & 0x0700) << 4;
8407 inst.instruction |= (imm & 0x00ff);
8408 return TRUE;
8409 }
8410 }
8411 }
8412 else if (arm_p)
8413 {
8414 int value = encode_arm_immediate (v);
8415
8416 if (value != FAIL)
8417 {
8418 /* This can be done with a mov instruction. */
8419 inst.instruction &= LITERAL_MASK;
8420 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8421 inst.instruction |= value & 0xfff;
8422 return TRUE;
8423 }
8424
8425 value = encode_arm_immediate (~ v);
8426 if (value != FAIL)
8427 {
8428 /* This can be done with a mvn instruction. */
8429 inst.instruction &= LITERAL_MASK;
8430 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8431 inst.instruction |= value & 0xfff;
8432 return TRUE;
8433 }
8434 }
8435 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8436 {
8437 int op = 0;
8438 unsigned immbits = 0;
8439 unsigned immlo = inst.operands[1].imm;
8440 unsigned immhi = inst.operands[1].regisimm
8441 ? inst.operands[1].reg
8442 : inst.relocs[0].exp.X_unsigned
8443 ? 0
8444 : ((bfd_int64_t)((int) immlo)) >> 32;
8445 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8446 &op, 64, NT_invtype);
8447
8448 if (cmode == FAIL)
8449 {
8450 neon_invert_size (&immlo, &immhi, 64);
8451 op = !op;
8452 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8453 &op, 64, NT_invtype);
8454 }
8455
8456 if (cmode != FAIL)
8457 {
8458 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8459 | (1 << 23)
8460 | (cmode << 8)
8461 | (op << 5)
8462 | (1 << 4);
8463
8464 /* Fill other bits in vmov encoding for both thumb and arm. */
8465 if (thumb_mode)
8466 inst.instruction |= (0x7U << 29) | (0xF << 24);
8467 else
8468 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8469 neon_write_immbits (immbits);
8470 return TRUE;
8471 }
8472 }
8473 }
8474
8475 if (t == CONST_VEC)
8476 {
8477 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8478 if (inst.operands[i].issingle
8479 && is_quarter_float (inst.operands[1].imm)
8480 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8481 {
8482 inst.operands[1].imm =
8483 neon_qfloat_bits (v);
8484 do_vfp_nsyn_opcode ("fconsts");
8485 return TRUE;
8486 }
8487
8488 /* If our host does not support a 64-bit type then we cannot perform
8489 the following optimization. This mean that there will be a
8490 discrepancy between the output produced by an assembler built for
8491 a 32-bit-only host and the output produced from a 64-bit host, but
8492 this cannot be helped. */
8493 #if defined BFD_HOST_64_BIT
8494 else if (!inst.operands[1].issingle
8495 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8496 {
8497 if (is_double_a_single (v)
8498 && is_quarter_float (double_to_single (v)))
8499 {
8500 inst.operands[1].imm =
8501 neon_qfloat_bits (double_to_single (v));
8502 do_vfp_nsyn_opcode ("fconstd");
8503 return TRUE;
8504 }
8505 }
8506 #endif
8507 }
8508 }
8509
8510 if (add_to_lit_pool ((!inst.operands[i].isvec
8511 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8512 return TRUE;
8513
8514 inst.operands[1].reg = REG_PC;
8515 inst.operands[1].isreg = 1;
8516 inst.operands[1].preind = 1;
8517 inst.relocs[0].pc_rel = 1;
8518 inst.relocs[0].type = (thumb_p
8519 ? BFD_RELOC_ARM_THUMB_OFFSET
8520 : (mode_3
8521 ? BFD_RELOC_ARM_HWLITERAL
8522 : BFD_RELOC_ARM_LITERAL));
8523 return FALSE;
8524 }
8525
8526 /* inst.operands[i] was set up by parse_address. Encode it into an
8527 ARM-format instruction. Reject all forms which cannot be encoded
8528 into a coprocessor load/store instruction. If wb_ok is false,
8529 reject use of writeback; if unind_ok is false, reject use of
8530 unindexed addressing. If reloc_override is not 0, use it instead
8531 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8532 (in which case it is preserved). */
8533
8534 static int
8535 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8536 {
8537 if (!inst.operands[i].isreg)
8538 {
8539 /* PR 18256 */
8540 if (! inst.operands[0].isvec)
8541 {
8542 inst.error = _("invalid co-processor operand");
8543 return FAIL;
8544 }
8545 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8546 return SUCCESS;
8547 }
8548
8549 inst.instruction |= inst.operands[i].reg << 16;
8550
8551 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8552
8553 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8554 {
8555 gas_assert (!inst.operands[i].writeback);
8556 if (!unind_ok)
8557 {
8558 inst.error = _("instruction does not support unindexed addressing");
8559 return FAIL;
8560 }
8561 inst.instruction |= inst.operands[i].imm;
8562 inst.instruction |= INDEX_UP;
8563 return SUCCESS;
8564 }
8565
8566 if (inst.operands[i].preind)
8567 inst.instruction |= PRE_INDEX;
8568
8569 if (inst.operands[i].writeback)
8570 {
8571 if (inst.operands[i].reg == REG_PC)
8572 {
8573 inst.error = _("pc may not be used with write-back");
8574 return FAIL;
8575 }
8576 if (!wb_ok)
8577 {
8578 inst.error = _("instruction does not support writeback");
8579 return FAIL;
8580 }
8581 inst.instruction |= WRITE_BACK;
8582 }
8583
8584 if (reloc_override)
8585 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8586 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8587 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8588 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8589 {
8590 if (thumb_mode)
8591 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8592 else
8593 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8594 }
8595
8596 /* Prefer + for zero encoded value. */
8597 if (!inst.operands[i].negative)
8598 inst.instruction |= INDEX_UP;
8599
8600 return SUCCESS;
8601 }
8602
8603 /* Functions for instruction encoding, sorted by sub-architecture.
8604 First some generics; their names are taken from the conventional
8605 bit positions for register arguments in ARM format instructions. */
8606
8607 static void
8608 do_noargs (void)
8609 {
8610 }
8611
8612 static void
8613 do_rd (void)
8614 {
8615 inst.instruction |= inst.operands[0].reg << 12;
8616 }
8617
8618 static void
8619 do_rn (void)
8620 {
8621 inst.instruction |= inst.operands[0].reg << 16;
8622 }
8623
8624 static void
8625 do_rd_rm (void)
8626 {
8627 inst.instruction |= inst.operands[0].reg << 12;
8628 inst.instruction |= inst.operands[1].reg;
8629 }
8630
8631 static void
8632 do_rm_rn (void)
8633 {
8634 inst.instruction |= inst.operands[0].reg;
8635 inst.instruction |= inst.operands[1].reg << 16;
8636 }
8637
8638 static void
8639 do_rd_rn (void)
8640 {
8641 inst.instruction |= inst.operands[0].reg << 12;
8642 inst.instruction |= inst.operands[1].reg << 16;
8643 }
8644
8645 static void
8646 do_rn_rd (void)
8647 {
8648 inst.instruction |= inst.operands[0].reg << 16;
8649 inst.instruction |= inst.operands[1].reg << 12;
8650 }
8651
8652 static void
8653 do_tt (void)
8654 {
8655 inst.instruction |= inst.operands[0].reg << 8;
8656 inst.instruction |= inst.operands[1].reg << 16;
8657 }
8658
8659 static bfd_boolean
8660 check_obsolete (const arm_feature_set *feature, const char *msg)
8661 {
8662 if (ARM_CPU_IS_ANY (cpu_variant))
8663 {
8664 as_tsktsk ("%s", msg);
8665 return TRUE;
8666 }
8667 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8668 {
8669 as_bad ("%s", msg);
8670 return TRUE;
8671 }
8672
8673 return FALSE;
8674 }
8675
8676 static void
8677 do_rd_rm_rn (void)
8678 {
8679 unsigned Rn = inst.operands[2].reg;
8680 /* Enforce restrictions on SWP instruction. */
8681 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8682 {
8683 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8684 _("Rn must not overlap other operands"));
8685
8686 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8687 */
8688 if (!check_obsolete (&arm_ext_v8,
8689 _("swp{b} use is obsoleted for ARMv8 and later"))
8690 && warn_on_deprecated
8691 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8692 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8693 }
8694
8695 inst.instruction |= inst.operands[0].reg << 12;
8696 inst.instruction |= inst.operands[1].reg;
8697 inst.instruction |= Rn << 16;
8698 }
8699
8700 static void
8701 do_rd_rn_rm (void)
8702 {
8703 inst.instruction |= inst.operands[0].reg << 12;
8704 inst.instruction |= inst.operands[1].reg << 16;
8705 inst.instruction |= inst.operands[2].reg;
8706 }
8707
8708 static void
8709 do_rm_rd_rn (void)
8710 {
8711 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8712 constraint (((inst.relocs[0].exp.X_op != O_constant
8713 && inst.relocs[0].exp.X_op != O_illegal)
8714 || inst.relocs[0].exp.X_add_number != 0),
8715 BAD_ADDR_MODE);
8716 inst.instruction |= inst.operands[0].reg;
8717 inst.instruction |= inst.operands[1].reg << 12;
8718 inst.instruction |= inst.operands[2].reg << 16;
8719 }
8720
8721 static void
8722 do_imm0 (void)
8723 {
8724 inst.instruction |= inst.operands[0].imm;
8725 }
8726
8727 static void
8728 do_rd_cpaddr (void)
8729 {
8730 inst.instruction |= inst.operands[0].reg << 12;
8731 encode_arm_cp_address (1, TRUE, TRUE, 0);
8732 }
8733
8734 /* ARM instructions, in alphabetical order by function name (except
8735 that wrapper functions appear immediately after the function they
8736 wrap). */
8737
8738 /* This is a pseudo-op of the form "adr rd, label" to be converted
8739 into a relative address of the form "add rd, pc, #label-.-8". */
8740
8741 static void
8742 do_adr (void)
8743 {
8744 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8745
8746 /* Frag hacking will turn this into a sub instruction if the offset turns
8747 out to be negative. */
8748 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8749 inst.relocs[0].pc_rel = 1;
8750 inst.relocs[0].exp.X_add_number -= 8;
8751
8752 if (support_interwork
8753 && inst.relocs[0].exp.X_op == O_symbol
8754 && inst.relocs[0].exp.X_add_symbol != NULL
8755 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8756 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8757 inst.relocs[0].exp.X_add_number |= 1;
8758 }
8759
8760 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8761 into a relative address of the form:
8762 add rd, pc, #low(label-.-8)"
8763 add rd, rd, #high(label-.-8)" */
8764
8765 static void
8766 do_adrl (void)
8767 {
8768 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8769
8770 /* Frag hacking will turn this into a sub instruction if the offset turns
8771 out to be negative. */
8772 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8773 inst.relocs[0].pc_rel = 1;
8774 inst.size = INSN_SIZE * 2;
8775 inst.relocs[0].exp.X_add_number -= 8;
8776
8777 if (support_interwork
8778 && inst.relocs[0].exp.X_op == O_symbol
8779 && inst.relocs[0].exp.X_add_symbol != NULL
8780 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8781 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8782 inst.relocs[0].exp.X_add_number |= 1;
8783 }
8784
8785 static void
8786 do_arit (void)
8787 {
8788 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8789 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8790 THUMB1_RELOC_ONLY);
8791 if (!inst.operands[1].present)
8792 inst.operands[1].reg = inst.operands[0].reg;
8793 inst.instruction |= inst.operands[0].reg << 12;
8794 inst.instruction |= inst.operands[1].reg << 16;
8795 encode_arm_shifter_operand (2);
8796 }
8797
8798 static void
8799 do_barrier (void)
8800 {
8801 if (inst.operands[0].present)
8802 inst.instruction |= inst.operands[0].imm;
8803 else
8804 inst.instruction |= 0xf;
8805 }
8806
8807 static void
8808 do_bfc (void)
8809 {
8810 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8811 constraint (msb > 32, _("bit-field extends past end of register"));
8812 /* The instruction encoding stores the LSB and MSB,
8813 not the LSB and width. */
8814 inst.instruction |= inst.operands[0].reg << 12;
8815 inst.instruction |= inst.operands[1].imm << 7;
8816 inst.instruction |= (msb - 1) << 16;
8817 }
8818
8819 static void
8820 do_bfi (void)
8821 {
8822 unsigned int msb;
8823
8824 /* #0 in second position is alternative syntax for bfc, which is
8825 the same instruction but with REG_PC in the Rm field. */
8826 if (!inst.operands[1].isreg)
8827 inst.operands[1].reg = REG_PC;
8828
8829 msb = inst.operands[2].imm + inst.operands[3].imm;
8830 constraint (msb > 32, _("bit-field extends past end of register"));
8831 /* The instruction encoding stores the LSB and MSB,
8832 not the LSB and width. */
8833 inst.instruction |= inst.operands[0].reg << 12;
8834 inst.instruction |= inst.operands[1].reg;
8835 inst.instruction |= inst.operands[2].imm << 7;
8836 inst.instruction |= (msb - 1) << 16;
8837 }
8838
8839 static void
8840 do_bfx (void)
8841 {
8842 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8843 _("bit-field extends past end of register"));
8844 inst.instruction |= inst.operands[0].reg << 12;
8845 inst.instruction |= inst.operands[1].reg;
8846 inst.instruction |= inst.operands[2].imm << 7;
8847 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8848 }
8849
8850 /* ARM V5 breakpoint instruction (argument parse)
8851 BKPT <16 bit unsigned immediate>
8852 Instruction is not conditional.
8853 The bit pattern given in insns[] has the COND_ALWAYS condition,
8854 and it is an error if the caller tried to override that. */
8855
8856 static void
8857 do_bkpt (void)
8858 {
8859 /* Top 12 of 16 bits to bits 19:8. */
8860 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8861
8862 /* Bottom 4 of 16 bits to bits 3:0. */
8863 inst.instruction |= inst.operands[0].imm & 0xf;
8864 }
8865
8866 static void
8867 encode_branch (int default_reloc)
8868 {
8869 if (inst.operands[0].hasreloc)
8870 {
8871 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8872 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8873 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8874 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8875 ? BFD_RELOC_ARM_PLT32
8876 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8877 }
8878 else
8879 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8880 inst.relocs[0].pc_rel = 1;
8881 }
8882
8883 static void
8884 do_branch (void)
8885 {
8886 #ifdef OBJ_ELF
8887 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8888 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8889 else
8890 #endif
8891 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8892 }
8893
8894 static void
8895 do_bl (void)
8896 {
8897 #ifdef OBJ_ELF
8898 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8899 {
8900 if (inst.cond == COND_ALWAYS)
8901 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8902 else
8903 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8904 }
8905 else
8906 #endif
8907 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8908 }
8909
8910 /* ARM V5 branch-link-exchange instruction (argument parse)
8911 BLX <target_addr> ie BLX(1)
8912 BLX{<condition>} <Rm> ie BLX(2)
8913 Unfortunately, there are two different opcodes for this mnemonic.
8914 So, the insns[].value is not used, and the code here zaps values
8915 into inst.instruction.
8916 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8917
8918 static void
8919 do_blx (void)
8920 {
8921 if (inst.operands[0].isreg)
8922 {
8923 /* Arg is a register; the opcode provided by insns[] is correct.
8924 It is not illegal to do "blx pc", just useless. */
8925 if (inst.operands[0].reg == REG_PC)
8926 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8927
8928 inst.instruction |= inst.operands[0].reg;
8929 }
8930 else
8931 {
8932 /* Arg is an address; this instruction cannot be executed
8933 conditionally, and the opcode must be adjusted.
8934 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8935 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8936 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8937 inst.instruction = 0xfa000000;
8938 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8939 }
8940 }
8941
8942 static void
8943 do_bx (void)
8944 {
8945 bfd_boolean want_reloc;
8946
8947 if (inst.operands[0].reg == REG_PC)
8948 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8949
8950 inst.instruction |= inst.operands[0].reg;
8951 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8952 it is for ARMv4t or earlier. */
8953 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8954 if (!ARM_FEATURE_ZERO (selected_object_arch)
8955 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8956 want_reloc = TRUE;
8957
8958 #ifdef OBJ_ELF
8959 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8960 #endif
8961 want_reloc = FALSE;
8962
8963 if (want_reloc)
8964 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
8965 }
8966
8967
8968 /* ARM v5TEJ. Jump to Jazelle code. */
8969
8970 static void
8971 do_bxj (void)
8972 {
8973 if (inst.operands[0].reg == REG_PC)
8974 as_tsktsk (_("use of r15 in bxj is not really useful"));
8975
8976 inst.instruction |= inst.operands[0].reg;
8977 }
8978
8979 /* Co-processor data operation:
8980 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8981 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8982 static void
8983 do_cdp (void)
8984 {
8985 inst.instruction |= inst.operands[0].reg << 8;
8986 inst.instruction |= inst.operands[1].imm << 20;
8987 inst.instruction |= inst.operands[2].reg << 12;
8988 inst.instruction |= inst.operands[3].reg << 16;
8989 inst.instruction |= inst.operands[4].reg;
8990 inst.instruction |= inst.operands[5].imm << 5;
8991 }
8992
8993 static void
8994 do_cmp (void)
8995 {
8996 inst.instruction |= inst.operands[0].reg << 16;
8997 encode_arm_shifter_operand (1);
8998 }
8999
9000 /* Transfer between coprocessor and ARM registers.
9001 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9002 MRC2
9003 MCR{cond}
9004 MCR2
9005
9006 No special properties. */
9007
9008 struct deprecated_coproc_regs_s
9009 {
9010 unsigned cp;
9011 int opc1;
9012 unsigned crn;
9013 unsigned crm;
9014 int opc2;
9015 arm_feature_set deprecated;
9016 arm_feature_set obsoleted;
9017 const char *dep_msg;
9018 const char *obs_msg;
9019 };
9020
9021 #define DEPR_ACCESS_V8 \
9022 N_("This coprocessor register access is deprecated in ARMv8")
9023
9024 /* Table of all deprecated coprocessor registers. */
9025 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9026 {
9027 {15, 0, 7, 10, 5, /* CP15DMB. */
9028 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9029 DEPR_ACCESS_V8, NULL},
9030 {15, 0, 7, 10, 4, /* CP15DSB. */
9031 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9032 DEPR_ACCESS_V8, NULL},
9033 {15, 0, 7, 5, 4, /* CP15ISB. */
9034 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9035 DEPR_ACCESS_V8, NULL},
9036 {14, 6, 1, 0, 0, /* TEEHBR. */
9037 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9038 DEPR_ACCESS_V8, NULL},
9039 {14, 6, 0, 0, 0, /* TEECR. */
9040 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9041 DEPR_ACCESS_V8, NULL},
9042 };
9043
9044 #undef DEPR_ACCESS_V8
9045
9046 static const size_t deprecated_coproc_reg_count =
9047 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9048
9049 static void
9050 do_co_reg (void)
9051 {
9052 unsigned Rd;
9053 size_t i;
9054
9055 Rd = inst.operands[2].reg;
9056 if (thumb_mode)
9057 {
9058 if (inst.instruction == 0xee000010
9059 || inst.instruction == 0xfe000010)
9060 /* MCR, MCR2 */
9061 reject_bad_reg (Rd);
9062 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9063 /* MRC, MRC2 */
9064 constraint (Rd == REG_SP, BAD_SP);
9065 }
9066 else
9067 {
9068 /* MCR */
9069 if (inst.instruction == 0xe000010)
9070 constraint (Rd == REG_PC, BAD_PC);
9071 }
9072
9073 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9074 {
9075 const struct deprecated_coproc_regs_s *r =
9076 deprecated_coproc_regs + i;
9077
9078 if (inst.operands[0].reg == r->cp
9079 && inst.operands[1].imm == r->opc1
9080 && inst.operands[3].reg == r->crn
9081 && inst.operands[4].reg == r->crm
9082 && inst.operands[5].imm == r->opc2)
9083 {
9084 if (! ARM_CPU_IS_ANY (cpu_variant)
9085 && warn_on_deprecated
9086 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9087 as_tsktsk ("%s", r->dep_msg);
9088 }
9089 }
9090
9091 inst.instruction |= inst.operands[0].reg << 8;
9092 inst.instruction |= inst.operands[1].imm << 21;
9093 inst.instruction |= Rd << 12;
9094 inst.instruction |= inst.operands[3].reg << 16;
9095 inst.instruction |= inst.operands[4].reg;
9096 inst.instruction |= inst.operands[5].imm << 5;
9097 }
9098
9099 /* Transfer between coprocessor register and pair of ARM registers.
9100 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9101 MCRR2
9102 MRRC{cond}
9103 MRRC2
9104
9105 Two XScale instructions are special cases of these:
9106
9107 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9108 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9109
9110 Result unpredictable if Rd or Rn is R15. */
9111
9112 static void
9113 do_co_reg2c (void)
9114 {
9115 unsigned Rd, Rn;
9116
9117 Rd = inst.operands[2].reg;
9118 Rn = inst.operands[3].reg;
9119
9120 if (thumb_mode)
9121 {
9122 reject_bad_reg (Rd);
9123 reject_bad_reg (Rn);
9124 }
9125 else
9126 {
9127 constraint (Rd == REG_PC, BAD_PC);
9128 constraint (Rn == REG_PC, BAD_PC);
9129 }
9130
9131 /* Only check the MRRC{2} variants. */
9132 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9133 {
9134 /* If Rd == Rn, error that the operation is
9135 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9136 constraint (Rd == Rn, BAD_OVERLAP);
9137 }
9138
9139 inst.instruction |= inst.operands[0].reg << 8;
9140 inst.instruction |= inst.operands[1].imm << 4;
9141 inst.instruction |= Rd << 12;
9142 inst.instruction |= Rn << 16;
9143 inst.instruction |= inst.operands[4].reg;
9144 }
9145
9146 static void
9147 do_cpsi (void)
9148 {
9149 inst.instruction |= inst.operands[0].imm << 6;
9150 if (inst.operands[1].present)
9151 {
9152 inst.instruction |= CPSI_MMOD;
9153 inst.instruction |= inst.operands[1].imm;
9154 }
9155 }
9156
9157 static void
9158 do_dbg (void)
9159 {
9160 inst.instruction |= inst.operands[0].imm;
9161 }
9162
9163 static void
9164 do_div (void)
9165 {
9166 unsigned Rd, Rn, Rm;
9167
9168 Rd = inst.operands[0].reg;
9169 Rn = (inst.operands[1].present
9170 ? inst.operands[1].reg : Rd);
9171 Rm = inst.operands[2].reg;
9172
9173 constraint ((Rd == REG_PC), BAD_PC);
9174 constraint ((Rn == REG_PC), BAD_PC);
9175 constraint ((Rm == REG_PC), BAD_PC);
9176
9177 inst.instruction |= Rd << 16;
9178 inst.instruction |= Rn << 0;
9179 inst.instruction |= Rm << 8;
9180 }
9181
9182 static void
9183 do_it (void)
9184 {
9185 /* There is no IT instruction in ARM mode. We
9186 process it to do the validation as if in
9187 thumb mode, just in case the code gets
9188 assembled for thumb using the unified syntax. */
9189
9190 inst.size = 0;
9191 if (unified_syntax)
9192 {
9193 set_pred_insn_type (IT_INSN);
9194 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9195 now_pred.cc = inst.operands[0].imm;
9196 }
9197 }
9198
9199 /* If there is only one register in the register list,
9200 then return its register number. Otherwise return -1. */
9201 static int
9202 only_one_reg_in_list (int range)
9203 {
9204 int i = ffs (range) - 1;
9205 return (i > 15 || range != (1 << i)) ? -1 : i;
9206 }
9207
9208 static void
9209 encode_ldmstm(int from_push_pop_mnem)
9210 {
9211 int base_reg = inst.operands[0].reg;
9212 int range = inst.operands[1].imm;
9213 int one_reg;
9214
9215 inst.instruction |= base_reg << 16;
9216 inst.instruction |= range;
9217
9218 if (inst.operands[1].writeback)
9219 inst.instruction |= LDM_TYPE_2_OR_3;
9220
9221 if (inst.operands[0].writeback)
9222 {
9223 inst.instruction |= WRITE_BACK;
9224 /* Check for unpredictable uses of writeback. */
9225 if (inst.instruction & LOAD_BIT)
9226 {
9227 /* Not allowed in LDM type 2. */
9228 if ((inst.instruction & LDM_TYPE_2_OR_3)
9229 && ((range & (1 << REG_PC)) == 0))
9230 as_warn (_("writeback of base register is UNPREDICTABLE"));
9231 /* Only allowed if base reg not in list for other types. */
9232 else if (range & (1 << base_reg))
9233 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9234 }
9235 else /* STM. */
9236 {
9237 /* Not allowed for type 2. */
9238 if (inst.instruction & LDM_TYPE_2_OR_3)
9239 as_warn (_("writeback of base register is UNPREDICTABLE"));
9240 /* Only allowed if base reg not in list, or first in list. */
9241 else if ((range & (1 << base_reg))
9242 && (range & ((1 << base_reg) - 1)))
9243 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9244 }
9245 }
9246
9247 /* If PUSH/POP has only one register, then use the A2 encoding. */
9248 one_reg = only_one_reg_in_list (range);
9249 if (from_push_pop_mnem && one_reg >= 0)
9250 {
9251 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9252
9253 if (is_push && one_reg == 13 /* SP */)
9254 /* PR 22483: The A2 encoding cannot be used when
9255 pushing the stack pointer as this is UNPREDICTABLE. */
9256 return;
9257
9258 inst.instruction &= A_COND_MASK;
9259 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9260 inst.instruction |= one_reg << 12;
9261 }
9262 }
9263
9264 static void
9265 do_ldmstm (void)
9266 {
9267 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9268 }
9269
9270 /* ARMv5TE load-consecutive (argument parse)
9271 Mode is like LDRH.
9272
9273 LDRccD R, mode
9274 STRccD R, mode. */
9275
9276 static void
9277 do_ldrd (void)
9278 {
9279 constraint (inst.operands[0].reg % 2 != 0,
9280 _("first transfer register must be even"));
9281 constraint (inst.operands[1].present
9282 && inst.operands[1].reg != inst.operands[0].reg + 1,
9283 _("can only transfer two consecutive registers"));
9284 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9285 constraint (!inst.operands[2].isreg, _("'[' expected"));
9286
9287 if (!inst.operands[1].present)
9288 inst.operands[1].reg = inst.operands[0].reg + 1;
9289
9290 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9291 register and the first register written; we have to diagnose
9292 overlap between the base and the second register written here. */
9293
9294 if (inst.operands[2].reg == inst.operands[1].reg
9295 && (inst.operands[2].writeback || inst.operands[2].postind))
9296 as_warn (_("base register written back, and overlaps "
9297 "second transfer register"));
9298
9299 if (!(inst.instruction & V4_STR_BIT))
9300 {
9301 /* For an index-register load, the index register must not overlap the
9302 destination (even if not write-back). */
9303 if (inst.operands[2].immisreg
9304 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9305 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9306 as_warn (_("index register overlaps transfer register"));
9307 }
9308 inst.instruction |= inst.operands[0].reg << 12;
9309 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9310 }
9311
9312 static void
9313 do_ldrex (void)
9314 {
9315 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9316 || inst.operands[1].postind || inst.operands[1].writeback
9317 || inst.operands[1].immisreg || inst.operands[1].shifted
9318 || inst.operands[1].negative
9319 /* This can arise if the programmer has written
9320 strex rN, rM, foo
9321 or if they have mistakenly used a register name as the last
9322 operand, eg:
9323 strex rN, rM, rX
9324 It is very difficult to distinguish between these two cases
9325 because "rX" might actually be a label. ie the register
9326 name has been occluded by a symbol of the same name. So we
9327 just generate a general 'bad addressing mode' type error
9328 message and leave it up to the programmer to discover the
9329 true cause and fix their mistake. */
9330 || (inst.operands[1].reg == REG_PC),
9331 BAD_ADDR_MODE);
9332
9333 constraint (inst.relocs[0].exp.X_op != O_constant
9334 || inst.relocs[0].exp.X_add_number != 0,
9335 _("offset must be zero in ARM encoding"));
9336
9337 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9338
9339 inst.instruction |= inst.operands[0].reg << 12;
9340 inst.instruction |= inst.operands[1].reg << 16;
9341 inst.relocs[0].type = BFD_RELOC_UNUSED;
9342 }
9343
9344 static void
9345 do_ldrexd (void)
9346 {
9347 constraint (inst.operands[0].reg % 2 != 0,
9348 _("even register required"));
9349 constraint (inst.operands[1].present
9350 && inst.operands[1].reg != inst.operands[0].reg + 1,
9351 _("can only load two consecutive registers"));
9352 /* If op 1 were present and equal to PC, this function wouldn't
9353 have been called in the first place. */
9354 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9355
9356 inst.instruction |= inst.operands[0].reg << 12;
9357 inst.instruction |= inst.operands[2].reg << 16;
9358 }
9359
9360 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9361 which is not a multiple of four is UNPREDICTABLE. */
9362 static void
9363 check_ldr_r15_aligned (void)
9364 {
9365 constraint (!(inst.operands[1].immisreg)
9366 && (inst.operands[0].reg == REG_PC
9367 && inst.operands[1].reg == REG_PC
9368 && (inst.relocs[0].exp.X_add_number & 0x3)),
9369 _("ldr to register 15 must be 4-byte aligned"));
9370 }
9371
9372 static void
9373 do_ldst (void)
9374 {
9375 inst.instruction |= inst.operands[0].reg << 12;
9376 if (!inst.operands[1].isreg)
9377 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9378 return;
9379 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9380 check_ldr_r15_aligned ();
9381 }
9382
9383 static void
9384 do_ldstt (void)
9385 {
9386 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9387 reject [Rn,...]. */
9388 if (inst.operands[1].preind)
9389 {
9390 constraint (inst.relocs[0].exp.X_op != O_constant
9391 || inst.relocs[0].exp.X_add_number != 0,
9392 _("this instruction requires a post-indexed address"));
9393
9394 inst.operands[1].preind = 0;
9395 inst.operands[1].postind = 1;
9396 inst.operands[1].writeback = 1;
9397 }
9398 inst.instruction |= inst.operands[0].reg << 12;
9399 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9400 }
9401
9402 /* Halfword and signed-byte load/store operations. */
9403
9404 static void
9405 do_ldstv4 (void)
9406 {
9407 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9408 inst.instruction |= inst.operands[0].reg << 12;
9409 if (!inst.operands[1].isreg)
9410 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9411 return;
9412 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9413 }
9414
9415 static void
9416 do_ldsttv4 (void)
9417 {
9418 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9419 reject [Rn,...]. */
9420 if (inst.operands[1].preind)
9421 {
9422 constraint (inst.relocs[0].exp.X_op != O_constant
9423 || inst.relocs[0].exp.X_add_number != 0,
9424 _("this instruction requires a post-indexed address"));
9425
9426 inst.operands[1].preind = 0;
9427 inst.operands[1].postind = 1;
9428 inst.operands[1].writeback = 1;
9429 }
9430 inst.instruction |= inst.operands[0].reg << 12;
9431 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9432 }
9433
9434 /* Co-processor register load/store.
9435 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9436 static void
9437 do_lstc (void)
9438 {
9439 inst.instruction |= inst.operands[0].reg << 8;
9440 inst.instruction |= inst.operands[1].reg << 12;
9441 encode_arm_cp_address (2, TRUE, TRUE, 0);
9442 }
9443
9444 static void
9445 do_mlas (void)
9446 {
9447 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9448 if (inst.operands[0].reg == inst.operands[1].reg
9449 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9450 && !(inst.instruction & 0x00400000))
9451 as_tsktsk (_("Rd and Rm should be different in mla"));
9452
9453 inst.instruction |= inst.operands[0].reg << 16;
9454 inst.instruction |= inst.operands[1].reg;
9455 inst.instruction |= inst.operands[2].reg << 8;
9456 inst.instruction |= inst.operands[3].reg << 12;
9457 }
9458
9459 static void
9460 do_mov (void)
9461 {
9462 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9463 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9464 THUMB1_RELOC_ONLY);
9465 inst.instruction |= inst.operands[0].reg << 12;
9466 encode_arm_shifter_operand (1);
9467 }
9468
9469 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9470 static void
9471 do_mov16 (void)
9472 {
9473 bfd_vma imm;
9474 bfd_boolean top;
9475
9476 top = (inst.instruction & 0x00400000) != 0;
9477 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9478 _(":lower16: not allowed in this instruction"));
9479 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9480 _(":upper16: not allowed in this instruction"));
9481 inst.instruction |= inst.operands[0].reg << 12;
9482 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9483 {
9484 imm = inst.relocs[0].exp.X_add_number;
9485 /* The value is in two pieces: 0:11, 16:19. */
9486 inst.instruction |= (imm & 0x00000fff);
9487 inst.instruction |= (imm & 0x0000f000) << 4;
9488 }
9489 }
9490
9491 static int
9492 do_vfp_nsyn_mrs (void)
9493 {
9494 if (inst.operands[0].isvec)
9495 {
9496 if (inst.operands[1].reg != 1)
9497 first_error (_("operand 1 must be FPSCR"));
9498 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9499 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9500 do_vfp_nsyn_opcode ("fmstat");
9501 }
9502 else if (inst.operands[1].isvec)
9503 do_vfp_nsyn_opcode ("fmrx");
9504 else
9505 return FAIL;
9506
9507 return SUCCESS;
9508 }
9509
9510 static int
9511 do_vfp_nsyn_msr (void)
9512 {
9513 if (inst.operands[0].isvec)
9514 do_vfp_nsyn_opcode ("fmxr");
9515 else
9516 return FAIL;
9517
9518 return SUCCESS;
9519 }
9520
9521 static void
9522 do_vmrs (void)
9523 {
9524 unsigned Rt = inst.operands[0].reg;
9525
9526 if (thumb_mode && Rt == REG_SP)
9527 {
9528 inst.error = BAD_SP;
9529 return;
9530 }
9531
9532 /* MVFR2 is only valid at ARMv8-A. */
9533 if (inst.operands[1].reg == 5)
9534 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9535 _(BAD_FPU));
9536
9537 /* APSR_ sets isvec. All other refs to PC are illegal. */
9538 if (!inst.operands[0].isvec && Rt == REG_PC)
9539 {
9540 inst.error = BAD_PC;
9541 return;
9542 }
9543
9544 /* If we get through parsing the register name, we just insert the number
9545 generated into the instruction without further validation. */
9546 inst.instruction |= (inst.operands[1].reg << 16);
9547 inst.instruction |= (Rt << 12);
9548 }
9549
9550 static void
9551 do_vmsr (void)
9552 {
9553 unsigned Rt = inst.operands[1].reg;
9554
9555 if (thumb_mode)
9556 reject_bad_reg (Rt);
9557 else if (Rt == REG_PC)
9558 {
9559 inst.error = BAD_PC;
9560 return;
9561 }
9562
9563 /* MVFR2 is only valid for ARMv8-A. */
9564 if (inst.operands[0].reg == 5)
9565 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9566 _(BAD_FPU));
9567
9568 /* If we get through parsing the register name, we just insert the number
9569 generated into the instruction without further validation. */
9570 inst.instruction |= (inst.operands[0].reg << 16);
9571 inst.instruction |= (Rt << 12);
9572 }
9573
9574 static void
9575 do_mrs (void)
9576 {
9577 unsigned br;
9578
9579 if (do_vfp_nsyn_mrs () == SUCCESS)
9580 return;
9581
9582 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9583 inst.instruction |= inst.operands[0].reg << 12;
9584
9585 if (inst.operands[1].isreg)
9586 {
9587 br = inst.operands[1].reg;
9588 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9589 as_bad (_("bad register for mrs"));
9590 }
9591 else
9592 {
9593 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9594 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9595 != (PSR_c|PSR_f),
9596 _("'APSR', 'CPSR' or 'SPSR' expected"));
9597 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9598 }
9599
9600 inst.instruction |= br;
9601 }
9602
9603 /* Two possible forms:
9604 "{C|S}PSR_<field>, Rm",
9605 "{C|S}PSR_f, #expression". */
9606
9607 static void
9608 do_msr (void)
9609 {
9610 if (do_vfp_nsyn_msr () == SUCCESS)
9611 return;
9612
9613 inst.instruction |= inst.operands[0].imm;
9614 if (inst.operands[1].isreg)
9615 inst.instruction |= inst.operands[1].reg;
9616 else
9617 {
9618 inst.instruction |= INST_IMMEDIATE;
9619 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9620 inst.relocs[0].pc_rel = 0;
9621 }
9622 }
9623
9624 static void
9625 do_mul (void)
9626 {
9627 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9628
9629 if (!inst.operands[2].present)
9630 inst.operands[2].reg = inst.operands[0].reg;
9631 inst.instruction |= inst.operands[0].reg << 16;
9632 inst.instruction |= inst.operands[1].reg;
9633 inst.instruction |= inst.operands[2].reg << 8;
9634
9635 if (inst.operands[0].reg == inst.operands[1].reg
9636 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9637 as_tsktsk (_("Rd and Rm should be different in mul"));
9638 }
9639
9640 /* Long Multiply Parser
9641 UMULL RdLo, RdHi, Rm, Rs
9642 SMULL RdLo, RdHi, Rm, Rs
9643 UMLAL RdLo, RdHi, Rm, Rs
9644 SMLAL RdLo, RdHi, Rm, Rs. */
9645
9646 static void
9647 do_mull (void)
9648 {
9649 inst.instruction |= inst.operands[0].reg << 12;
9650 inst.instruction |= inst.operands[1].reg << 16;
9651 inst.instruction |= inst.operands[2].reg;
9652 inst.instruction |= inst.operands[3].reg << 8;
9653
9654 /* rdhi and rdlo must be different. */
9655 if (inst.operands[0].reg == inst.operands[1].reg)
9656 as_tsktsk (_("rdhi and rdlo must be different"));
9657
9658 /* rdhi, rdlo and rm must all be different before armv6. */
9659 if ((inst.operands[0].reg == inst.operands[2].reg
9660 || inst.operands[1].reg == inst.operands[2].reg)
9661 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9662 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9663 }
9664
9665 static void
9666 do_nop (void)
9667 {
9668 if (inst.operands[0].present
9669 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9670 {
9671 /* Architectural NOP hints are CPSR sets with no bits selected. */
9672 inst.instruction &= 0xf0000000;
9673 inst.instruction |= 0x0320f000;
9674 if (inst.operands[0].present)
9675 inst.instruction |= inst.operands[0].imm;
9676 }
9677 }
9678
9679 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9680 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9681 Condition defaults to COND_ALWAYS.
9682 Error if Rd, Rn or Rm are R15. */
9683
9684 static void
9685 do_pkhbt (void)
9686 {
9687 inst.instruction |= inst.operands[0].reg << 12;
9688 inst.instruction |= inst.operands[1].reg << 16;
9689 inst.instruction |= inst.operands[2].reg;
9690 if (inst.operands[3].present)
9691 encode_arm_shift (3);
9692 }
9693
9694 /* ARM V6 PKHTB (Argument Parse). */
9695
9696 static void
9697 do_pkhtb (void)
9698 {
9699 if (!inst.operands[3].present)
9700 {
9701 /* If the shift specifier is omitted, turn the instruction
9702 into pkhbt rd, rm, rn. */
9703 inst.instruction &= 0xfff00010;
9704 inst.instruction |= inst.operands[0].reg << 12;
9705 inst.instruction |= inst.operands[1].reg;
9706 inst.instruction |= inst.operands[2].reg << 16;
9707 }
9708 else
9709 {
9710 inst.instruction |= inst.operands[0].reg << 12;
9711 inst.instruction |= inst.operands[1].reg << 16;
9712 inst.instruction |= inst.operands[2].reg;
9713 encode_arm_shift (3);
9714 }
9715 }
9716
9717 /* ARMv5TE: Preload-Cache
9718 MP Extensions: Preload for write
9719
9720 PLD(W) <addr_mode>
9721
9722 Syntactically, like LDR with B=1, W=0, L=1. */
9723
9724 static void
9725 do_pld (void)
9726 {
9727 constraint (!inst.operands[0].isreg,
9728 _("'[' expected after PLD mnemonic"));
9729 constraint (inst.operands[0].postind,
9730 _("post-indexed expression used in preload instruction"));
9731 constraint (inst.operands[0].writeback,
9732 _("writeback used in preload instruction"));
9733 constraint (!inst.operands[0].preind,
9734 _("unindexed addressing used in preload instruction"));
9735 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9736 }
9737
9738 /* ARMv7: PLI <addr_mode> */
9739 static void
9740 do_pli (void)
9741 {
9742 constraint (!inst.operands[0].isreg,
9743 _("'[' expected after PLI mnemonic"));
9744 constraint (inst.operands[0].postind,
9745 _("post-indexed expression used in preload instruction"));
9746 constraint (inst.operands[0].writeback,
9747 _("writeback used in preload instruction"));
9748 constraint (!inst.operands[0].preind,
9749 _("unindexed addressing used in preload instruction"));
9750 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9751 inst.instruction &= ~PRE_INDEX;
9752 }
9753
9754 static void
9755 do_push_pop (void)
9756 {
9757 constraint (inst.operands[0].writeback,
9758 _("push/pop do not support {reglist}^"));
9759 inst.operands[1] = inst.operands[0];
9760 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9761 inst.operands[0].isreg = 1;
9762 inst.operands[0].writeback = 1;
9763 inst.operands[0].reg = REG_SP;
9764 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9765 }
9766
9767 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9768 word at the specified address and the following word
9769 respectively.
9770 Unconditionally executed.
9771 Error if Rn is R15. */
9772
9773 static void
9774 do_rfe (void)
9775 {
9776 inst.instruction |= inst.operands[0].reg << 16;
9777 if (inst.operands[0].writeback)
9778 inst.instruction |= WRITE_BACK;
9779 }
9780
9781 /* ARM V6 ssat (argument parse). */
9782
9783 static void
9784 do_ssat (void)
9785 {
9786 inst.instruction |= inst.operands[0].reg << 12;
9787 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9788 inst.instruction |= inst.operands[2].reg;
9789
9790 if (inst.operands[3].present)
9791 encode_arm_shift (3);
9792 }
9793
9794 /* ARM V6 usat (argument parse). */
9795
9796 static void
9797 do_usat (void)
9798 {
9799 inst.instruction |= inst.operands[0].reg << 12;
9800 inst.instruction |= inst.operands[1].imm << 16;
9801 inst.instruction |= inst.operands[2].reg;
9802
9803 if (inst.operands[3].present)
9804 encode_arm_shift (3);
9805 }
9806
9807 /* ARM V6 ssat16 (argument parse). */
9808
9809 static void
9810 do_ssat16 (void)
9811 {
9812 inst.instruction |= inst.operands[0].reg << 12;
9813 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9814 inst.instruction |= inst.operands[2].reg;
9815 }
9816
9817 static void
9818 do_usat16 (void)
9819 {
9820 inst.instruction |= inst.operands[0].reg << 12;
9821 inst.instruction |= inst.operands[1].imm << 16;
9822 inst.instruction |= inst.operands[2].reg;
9823 }
9824
9825 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9826 preserving the other bits.
9827
9828 setend <endian_specifier>, where <endian_specifier> is either
9829 BE or LE. */
9830
9831 static void
9832 do_setend (void)
9833 {
9834 if (warn_on_deprecated
9835 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9836 as_tsktsk (_("setend use is deprecated for ARMv8"));
9837
9838 if (inst.operands[0].imm)
9839 inst.instruction |= 0x200;
9840 }
9841
9842 static void
9843 do_shift (void)
9844 {
9845 unsigned int Rm = (inst.operands[1].present
9846 ? inst.operands[1].reg
9847 : inst.operands[0].reg);
9848
9849 inst.instruction |= inst.operands[0].reg << 12;
9850 inst.instruction |= Rm;
9851 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9852 {
9853 inst.instruction |= inst.operands[2].reg << 8;
9854 inst.instruction |= SHIFT_BY_REG;
9855 /* PR 12854: Error on extraneous shifts. */
9856 constraint (inst.operands[2].shifted,
9857 _("extraneous shift as part of operand to shift insn"));
9858 }
9859 else
9860 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9861 }
9862
9863 static void
9864 do_smc (void)
9865 {
9866 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9867 inst.relocs[0].pc_rel = 0;
9868 }
9869
9870 static void
9871 do_hvc (void)
9872 {
9873 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9874 inst.relocs[0].pc_rel = 0;
9875 }
9876
9877 static void
9878 do_swi (void)
9879 {
9880 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9881 inst.relocs[0].pc_rel = 0;
9882 }
9883
9884 static void
9885 do_setpan (void)
9886 {
9887 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9888 _("selected processor does not support SETPAN instruction"));
9889
9890 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9891 }
9892
9893 static void
9894 do_t_setpan (void)
9895 {
9896 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9897 _("selected processor does not support SETPAN instruction"));
9898
9899 inst.instruction |= (inst.operands[0].imm << 3);
9900 }
9901
9902 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9903 SMLAxy{cond} Rd,Rm,Rs,Rn
9904 SMLAWy{cond} Rd,Rm,Rs,Rn
9905 Error if any register is R15. */
9906
9907 static void
9908 do_smla (void)
9909 {
9910 inst.instruction |= inst.operands[0].reg << 16;
9911 inst.instruction |= inst.operands[1].reg;
9912 inst.instruction |= inst.operands[2].reg << 8;
9913 inst.instruction |= inst.operands[3].reg << 12;
9914 }
9915
9916 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9917 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9918 Error if any register is R15.
9919 Warning if Rdlo == Rdhi. */
9920
9921 static void
9922 do_smlal (void)
9923 {
9924 inst.instruction |= inst.operands[0].reg << 12;
9925 inst.instruction |= inst.operands[1].reg << 16;
9926 inst.instruction |= inst.operands[2].reg;
9927 inst.instruction |= inst.operands[3].reg << 8;
9928
9929 if (inst.operands[0].reg == inst.operands[1].reg)
9930 as_tsktsk (_("rdhi and rdlo must be different"));
9931 }
9932
9933 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9934 SMULxy{cond} Rd,Rm,Rs
9935 Error if any register is R15. */
9936
9937 static void
9938 do_smul (void)
9939 {
9940 inst.instruction |= inst.operands[0].reg << 16;
9941 inst.instruction |= inst.operands[1].reg;
9942 inst.instruction |= inst.operands[2].reg << 8;
9943 }
9944
9945 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9946 the same for both ARM and Thumb-2. */
9947
9948 static void
9949 do_srs (void)
9950 {
9951 int reg;
9952
9953 if (inst.operands[0].present)
9954 {
9955 reg = inst.operands[0].reg;
9956 constraint (reg != REG_SP, _("SRS base register must be r13"));
9957 }
9958 else
9959 reg = REG_SP;
9960
9961 inst.instruction |= reg << 16;
9962 inst.instruction |= inst.operands[1].imm;
9963 if (inst.operands[0].writeback || inst.operands[1].writeback)
9964 inst.instruction |= WRITE_BACK;
9965 }
9966
9967 /* ARM V6 strex (argument parse). */
9968
9969 static void
9970 do_strex (void)
9971 {
9972 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9973 || inst.operands[2].postind || inst.operands[2].writeback
9974 || inst.operands[2].immisreg || inst.operands[2].shifted
9975 || inst.operands[2].negative
9976 /* See comment in do_ldrex(). */
9977 || (inst.operands[2].reg == REG_PC),
9978 BAD_ADDR_MODE);
9979
9980 constraint (inst.operands[0].reg == inst.operands[1].reg
9981 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9982
9983 constraint (inst.relocs[0].exp.X_op != O_constant
9984 || inst.relocs[0].exp.X_add_number != 0,
9985 _("offset must be zero in ARM encoding"));
9986
9987 inst.instruction |= inst.operands[0].reg << 12;
9988 inst.instruction |= inst.operands[1].reg;
9989 inst.instruction |= inst.operands[2].reg << 16;
9990 inst.relocs[0].type = BFD_RELOC_UNUSED;
9991 }
9992
9993 static void
9994 do_t_strexbh (void)
9995 {
9996 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9997 || inst.operands[2].postind || inst.operands[2].writeback
9998 || inst.operands[2].immisreg || inst.operands[2].shifted
9999 || inst.operands[2].negative,
10000 BAD_ADDR_MODE);
10001
10002 constraint (inst.operands[0].reg == inst.operands[1].reg
10003 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10004
10005 do_rm_rd_rn ();
10006 }
10007
10008 static void
10009 do_strexd (void)
10010 {
10011 constraint (inst.operands[1].reg % 2 != 0,
10012 _("even register required"));
10013 constraint (inst.operands[2].present
10014 && inst.operands[2].reg != inst.operands[1].reg + 1,
10015 _("can only store two consecutive registers"));
10016 /* If op 2 were present and equal to PC, this function wouldn't
10017 have been called in the first place. */
10018 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10019
10020 constraint (inst.operands[0].reg == inst.operands[1].reg
10021 || inst.operands[0].reg == inst.operands[1].reg + 1
10022 || inst.operands[0].reg == inst.operands[3].reg,
10023 BAD_OVERLAP);
10024
10025 inst.instruction |= inst.operands[0].reg << 12;
10026 inst.instruction |= inst.operands[1].reg;
10027 inst.instruction |= inst.operands[3].reg << 16;
10028 }
10029
10030 /* ARM V8 STRL. */
10031 static void
10032 do_stlex (void)
10033 {
10034 constraint (inst.operands[0].reg == inst.operands[1].reg
10035 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10036
10037 do_rd_rm_rn ();
10038 }
10039
10040 static void
10041 do_t_stlex (void)
10042 {
10043 constraint (inst.operands[0].reg == inst.operands[1].reg
10044 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10045
10046 do_rm_rd_rn ();
10047 }
10048
10049 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10050 extends it to 32-bits, and adds the result to a value in another
10051 register. You can specify a rotation by 0, 8, 16, or 24 bits
10052 before extracting the 16-bit value.
10053 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10054 Condition defaults to COND_ALWAYS.
10055 Error if any register uses R15. */
10056
10057 static void
10058 do_sxtah (void)
10059 {
10060 inst.instruction |= inst.operands[0].reg << 12;
10061 inst.instruction |= inst.operands[1].reg << 16;
10062 inst.instruction |= inst.operands[2].reg;
10063 inst.instruction |= inst.operands[3].imm << 10;
10064 }
10065
10066 /* ARM V6 SXTH.
10067
10068 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10069 Condition defaults to COND_ALWAYS.
10070 Error if any register uses R15. */
10071
10072 static void
10073 do_sxth (void)
10074 {
10075 inst.instruction |= inst.operands[0].reg << 12;
10076 inst.instruction |= inst.operands[1].reg;
10077 inst.instruction |= inst.operands[2].imm << 10;
10078 }
10079 \f
10080 /* VFP instructions. In a logical order: SP variant first, monad
10081 before dyad, arithmetic then move then load/store. */
10082
10083 static void
10084 do_vfp_sp_monadic (void)
10085 {
10086 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10087 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10088 }
10089
10090 static void
10091 do_vfp_sp_dyadic (void)
10092 {
10093 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10094 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10095 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10096 }
10097
10098 static void
10099 do_vfp_sp_compare_z (void)
10100 {
10101 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10102 }
10103
10104 static void
10105 do_vfp_dp_sp_cvt (void)
10106 {
10107 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10108 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10109 }
10110
10111 static void
10112 do_vfp_sp_dp_cvt (void)
10113 {
10114 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10115 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10116 }
10117
10118 static void
10119 do_vfp_reg_from_sp (void)
10120 {
10121 inst.instruction |= inst.operands[0].reg << 12;
10122 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10123 }
10124
10125 static void
10126 do_vfp_reg2_from_sp2 (void)
10127 {
10128 constraint (inst.operands[2].imm != 2,
10129 _("only two consecutive VFP SP registers allowed here"));
10130 inst.instruction |= inst.operands[0].reg << 12;
10131 inst.instruction |= inst.operands[1].reg << 16;
10132 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10133 }
10134
10135 static void
10136 do_vfp_sp_from_reg (void)
10137 {
10138 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10139 inst.instruction |= inst.operands[1].reg << 12;
10140 }
10141
10142 static void
10143 do_vfp_sp2_from_reg2 (void)
10144 {
10145 constraint (inst.operands[0].imm != 2,
10146 _("only two consecutive VFP SP registers allowed here"));
10147 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10148 inst.instruction |= inst.operands[1].reg << 12;
10149 inst.instruction |= inst.operands[2].reg << 16;
10150 }
10151
10152 static void
10153 do_vfp_sp_ldst (void)
10154 {
10155 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10156 encode_arm_cp_address (1, FALSE, TRUE, 0);
10157 }
10158
10159 static void
10160 do_vfp_dp_ldst (void)
10161 {
10162 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10163 encode_arm_cp_address (1, FALSE, TRUE, 0);
10164 }
10165
10166
10167 static void
10168 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10169 {
10170 if (inst.operands[0].writeback)
10171 inst.instruction |= WRITE_BACK;
10172 else
10173 constraint (ldstm_type != VFP_LDSTMIA,
10174 _("this addressing mode requires base-register writeback"));
10175 inst.instruction |= inst.operands[0].reg << 16;
10176 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10177 inst.instruction |= inst.operands[1].imm;
10178 }
10179
10180 static void
10181 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10182 {
10183 int count;
10184
10185 if (inst.operands[0].writeback)
10186 inst.instruction |= WRITE_BACK;
10187 else
10188 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10189 _("this addressing mode requires base-register writeback"));
10190
10191 inst.instruction |= inst.operands[0].reg << 16;
10192 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10193
10194 count = inst.operands[1].imm << 1;
10195 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10196 count += 1;
10197
10198 inst.instruction |= count;
10199 }
10200
10201 static void
10202 do_vfp_sp_ldstmia (void)
10203 {
10204 vfp_sp_ldstm (VFP_LDSTMIA);
10205 }
10206
10207 static void
10208 do_vfp_sp_ldstmdb (void)
10209 {
10210 vfp_sp_ldstm (VFP_LDSTMDB);
10211 }
10212
10213 static void
10214 do_vfp_dp_ldstmia (void)
10215 {
10216 vfp_dp_ldstm (VFP_LDSTMIA);
10217 }
10218
10219 static void
10220 do_vfp_dp_ldstmdb (void)
10221 {
10222 vfp_dp_ldstm (VFP_LDSTMDB);
10223 }
10224
10225 static void
10226 do_vfp_xp_ldstmia (void)
10227 {
10228 vfp_dp_ldstm (VFP_LDSTMIAX);
10229 }
10230
10231 static void
10232 do_vfp_xp_ldstmdb (void)
10233 {
10234 vfp_dp_ldstm (VFP_LDSTMDBX);
10235 }
10236
10237 static void
10238 do_vfp_dp_rd_rm (void)
10239 {
10240 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10241 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10242 }
10243
10244 static void
10245 do_vfp_dp_rn_rd (void)
10246 {
10247 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10248 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10249 }
10250
10251 static void
10252 do_vfp_dp_rd_rn (void)
10253 {
10254 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10255 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10256 }
10257
10258 static void
10259 do_vfp_dp_rd_rn_rm (void)
10260 {
10261 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10262 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10263 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10264 }
10265
10266 static void
10267 do_vfp_dp_rd (void)
10268 {
10269 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10270 }
10271
10272 static void
10273 do_vfp_dp_rm_rd_rn (void)
10274 {
10275 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10276 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10277 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10278 }
10279
10280 /* VFPv3 instructions. */
10281 static void
10282 do_vfp_sp_const (void)
10283 {
10284 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10285 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10286 inst.instruction |= (inst.operands[1].imm & 0x0f);
10287 }
10288
10289 static void
10290 do_vfp_dp_const (void)
10291 {
10292 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10293 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10294 inst.instruction |= (inst.operands[1].imm & 0x0f);
10295 }
10296
10297 static void
10298 vfp_conv (int srcsize)
10299 {
10300 int immbits = srcsize - inst.operands[1].imm;
10301
10302 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10303 {
10304 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10305 i.e. immbits must be in range 0 - 16. */
10306 inst.error = _("immediate value out of range, expected range [0, 16]");
10307 return;
10308 }
10309 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10310 {
10311 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10312 i.e. immbits must be in range 0 - 31. */
10313 inst.error = _("immediate value out of range, expected range [1, 32]");
10314 return;
10315 }
10316
10317 inst.instruction |= (immbits & 1) << 5;
10318 inst.instruction |= (immbits >> 1);
10319 }
10320
10321 static void
10322 do_vfp_sp_conv_16 (void)
10323 {
10324 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10325 vfp_conv (16);
10326 }
10327
10328 static void
10329 do_vfp_dp_conv_16 (void)
10330 {
10331 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10332 vfp_conv (16);
10333 }
10334
10335 static void
10336 do_vfp_sp_conv_32 (void)
10337 {
10338 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10339 vfp_conv (32);
10340 }
10341
10342 static void
10343 do_vfp_dp_conv_32 (void)
10344 {
10345 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10346 vfp_conv (32);
10347 }
10348 \f
10349 /* FPA instructions. Also in a logical order. */
10350
10351 static void
10352 do_fpa_cmp (void)
10353 {
10354 inst.instruction |= inst.operands[0].reg << 16;
10355 inst.instruction |= inst.operands[1].reg;
10356 }
10357
10358 static void
10359 do_fpa_ldmstm (void)
10360 {
10361 inst.instruction |= inst.operands[0].reg << 12;
10362 switch (inst.operands[1].imm)
10363 {
10364 case 1: inst.instruction |= CP_T_X; break;
10365 case 2: inst.instruction |= CP_T_Y; break;
10366 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10367 case 4: break;
10368 default: abort ();
10369 }
10370
10371 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10372 {
10373 /* The instruction specified "ea" or "fd", so we can only accept
10374 [Rn]{!}. The instruction does not really support stacking or
10375 unstacking, so we have to emulate these by setting appropriate
10376 bits and offsets. */
10377 constraint (inst.relocs[0].exp.X_op != O_constant
10378 || inst.relocs[0].exp.X_add_number != 0,
10379 _("this instruction does not support indexing"));
10380
10381 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10382 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10383
10384 if (!(inst.instruction & INDEX_UP))
10385 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10386
10387 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10388 {
10389 inst.operands[2].preind = 0;
10390 inst.operands[2].postind = 1;
10391 }
10392 }
10393
10394 encode_arm_cp_address (2, TRUE, TRUE, 0);
10395 }
10396 \f
10397 /* iWMMXt instructions: strictly in alphabetical order. */
10398
10399 static void
10400 do_iwmmxt_tandorc (void)
10401 {
10402 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10403 }
10404
10405 static void
10406 do_iwmmxt_textrc (void)
10407 {
10408 inst.instruction |= inst.operands[0].reg << 12;
10409 inst.instruction |= inst.operands[1].imm;
10410 }
10411
10412 static void
10413 do_iwmmxt_textrm (void)
10414 {
10415 inst.instruction |= inst.operands[0].reg << 12;
10416 inst.instruction |= inst.operands[1].reg << 16;
10417 inst.instruction |= inst.operands[2].imm;
10418 }
10419
10420 static void
10421 do_iwmmxt_tinsr (void)
10422 {
10423 inst.instruction |= inst.operands[0].reg << 16;
10424 inst.instruction |= inst.operands[1].reg << 12;
10425 inst.instruction |= inst.operands[2].imm;
10426 }
10427
10428 static void
10429 do_iwmmxt_tmia (void)
10430 {
10431 inst.instruction |= inst.operands[0].reg << 5;
10432 inst.instruction |= inst.operands[1].reg;
10433 inst.instruction |= inst.operands[2].reg << 12;
10434 }
10435
10436 static void
10437 do_iwmmxt_waligni (void)
10438 {
10439 inst.instruction |= inst.operands[0].reg << 12;
10440 inst.instruction |= inst.operands[1].reg << 16;
10441 inst.instruction |= inst.operands[2].reg;
10442 inst.instruction |= inst.operands[3].imm << 20;
10443 }
10444
10445 static void
10446 do_iwmmxt_wmerge (void)
10447 {
10448 inst.instruction |= inst.operands[0].reg << 12;
10449 inst.instruction |= inst.operands[1].reg << 16;
10450 inst.instruction |= inst.operands[2].reg;
10451 inst.instruction |= inst.operands[3].imm << 21;
10452 }
10453
10454 static void
10455 do_iwmmxt_wmov (void)
10456 {
10457 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10458 inst.instruction |= inst.operands[0].reg << 12;
10459 inst.instruction |= inst.operands[1].reg << 16;
10460 inst.instruction |= inst.operands[1].reg;
10461 }
10462
10463 static void
10464 do_iwmmxt_wldstbh (void)
10465 {
10466 int reloc;
10467 inst.instruction |= inst.operands[0].reg << 12;
10468 if (thumb_mode)
10469 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10470 else
10471 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10472 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10473 }
10474
10475 static void
10476 do_iwmmxt_wldstw (void)
10477 {
10478 /* RIWR_RIWC clears .isreg for a control register. */
10479 if (!inst.operands[0].isreg)
10480 {
10481 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10482 inst.instruction |= 0xf0000000;
10483 }
10484
10485 inst.instruction |= inst.operands[0].reg << 12;
10486 encode_arm_cp_address (1, TRUE, TRUE, 0);
10487 }
10488
10489 static void
10490 do_iwmmxt_wldstd (void)
10491 {
10492 inst.instruction |= inst.operands[0].reg << 12;
10493 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10494 && inst.operands[1].immisreg)
10495 {
10496 inst.instruction &= ~0x1a000ff;
10497 inst.instruction |= (0xfU << 28);
10498 if (inst.operands[1].preind)
10499 inst.instruction |= PRE_INDEX;
10500 if (!inst.operands[1].negative)
10501 inst.instruction |= INDEX_UP;
10502 if (inst.operands[1].writeback)
10503 inst.instruction |= WRITE_BACK;
10504 inst.instruction |= inst.operands[1].reg << 16;
10505 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10506 inst.instruction |= inst.operands[1].imm;
10507 }
10508 else
10509 encode_arm_cp_address (1, TRUE, FALSE, 0);
10510 }
10511
10512 static void
10513 do_iwmmxt_wshufh (void)
10514 {
10515 inst.instruction |= inst.operands[0].reg << 12;
10516 inst.instruction |= inst.operands[1].reg << 16;
10517 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10518 inst.instruction |= (inst.operands[2].imm & 0x0f);
10519 }
10520
10521 static void
10522 do_iwmmxt_wzero (void)
10523 {
10524 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10525 inst.instruction |= inst.operands[0].reg;
10526 inst.instruction |= inst.operands[0].reg << 12;
10527 inst.instruction |= inst.operands[0].reg << 16;
10528 }
10529
10530 static void
10531 do_iwmmxt_wrwrwr_or_imm5 (void)
10532 {
10533 if (inst.operands[2].isreg)
10534 do_rd_rn_rm ();
10535 else {
10536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10537 _("immediate operand requires iWMMXt2"));
10538 do_rd_rn ();
10539 if (inst.operands[2].imm == 0)
10540 {
10541 switch ((inst.instruction >> 20) & 0xf)
10542 {
10543 case 4:
10544 case 5:
10545 case 6:
10546 case 7:
10547 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10548 inst.operands[2].imm = 16;
10549 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10550 break;
10551 case 8:
10552 case 9:
10553 case 10:
10554 case 11:
10555 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10556 inst.operands[2].imm = 32;
10557 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10558 break;
10559 case 12:
10560 case 13:
10561 case 14:
10562 case 15:
10563 {
10564 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10565 unsigned long wrn;
10566 wrn = (inst.instruction >> 16) & 0xf;
10567 inst.instruction &= 0xff0fff0f;
10568 inst.instruction |= wrn;
10569 /* Bail out here; the instruction is now assembled. */
10570 return;
10571 }
10572 }
10573 }
10574 /* Map 32 -> 0, etc. */
10575 inst.operands[2].imm &= 0x1f;
10576 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10577 }
10578 }
10579 \f
10580 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10581 operations first, then control, shift, and load/store. */
10582
10583 /* Insns like "foo X,Y,Z". */
10584
10585 static void
10586 do_mav_triple (void)
10587 {
10588 inst.instruction |= inst.operands[0].reg << 16;
10589 inst.instruction |= inst.operands[1].reg;
10590 inst.instruction |= inst.operands[2].reg << 12;
10591 }
10592
10593 /* Insns like "foo W,X,Y,Z".
10594 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10595
10596 static void
10597 do_mav_quad (void)
10598 {
10599 inst.instruction |= inst.operands[0].reg << 5;
10600 inst.instruction |= inst.operands[1].reg << 12;
10601 inst.instruction |= inst.operands[2].reg << 16;
10602 inst.instruction |= inst.operands[3].reg;
10603 }
10604
10605 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10606 static void
10607 do_mav_dspsc (void)
10608 {
10609 inst.instruction |= inst.operands[1].reg << 12;
10610 }
10611
10612 /* Maverick shift immediate instructions.
10613 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10614 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10615
10616 static void
10617 do_mav_shift (void)
10618 {
10619 int imm = inst.operands[2].imm;
10620
10621 inst.instruction |= inst.operands[0].reg << 12;
10622 inst.instruction |= inst.operands[1].reg << 16;
10623
10624 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10625 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10626 Bit 4 should be 0. */
10627 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10628
10629 inst.instruction |= imm;
10630 }
10631 \f
10632 /* XScale instructions. Also sorted arithmetic before move. */
10633
10634 /* Xscale multiply-accumulate (argument parse)
10635 MIAcc acc0,Rm,Rs
10636 MIAPHcc acc0,Rm,Rs
10637 MIAxycc acc0,Rm,Rs. */
10638
10639 static void
10640 do_xsc_mia (void)
10641 {
10642 inst.instruction |= inst.operands[1].reg;
10643 inst.instruction |= inst.operands[2].reg << 12;
10644 }
10645
10646 /* Xscale move-accumulator-register (argument parse)
10647
10648 MARcc acc0,RdLo,RdHi. */
10649
10650 static void
10651 do_xsc_mar (void)
10652 {
10653 inst.instruction |= inst.operands[1].reg << 12;
10654 inst.instruction |= inst.operands[2].reg << 16;
10655 }
10656
10657 /* Xscale move-register-accumulator (argument parse)
10658
10659 MRAcc RdLo,RdHi,acc0. */
10660
10661 static void
10662 do_xsc_mra (void)
10663 {
10664 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10665 inst.instruction |= inst.operands[0].reg << 12;
10666 inst.instruction |= inst.operands[1].reg << 16;
10667 }
10668 \f
10669 /* Encoding functions relevant only to Thumb. */
10670
10671 /* inst.operands[i] is a shifted-register operand; encode
10672 it into inst.instruction in the format used by Thumb32. */
10673
10674 static void
10675 encode_thumb32_shifted_operand (int i)
10676 {
10677 unsigned int value = inst.relocs[0].exp.X_add_number;
10678 unsigned int shift = inst.operands[i].shift_kind;
10679
10680 constraint (inst.operands[i].immisreg,
10681 _("shift by register not allowed in thumb mode"));
10682 inst.instruction |= inst.operands[i].reg;
10683 if (shift == SHIFT_RRX)
10684 inst.instruction |= SHIFT_ROR << 4;
10685 else
10686 {
10687 constraint (inst.relocs[0].exp.X_op != O_constant,
10688 _("expression too complex"));
10689
10690 constraint (value > 32
10691 || (value == 32 && (shift == SHIFT_LSL
10692 || shift == SHIFT_ROR)),
10693 _("shift expression is too large"));
10694
10695 if (value == 0)
10696 shift = SHIFT_LSL;
10697 else if (value == 32)
10698 value = 0;
10699
10700 inst.instruction |= shift << 4;
10701 inst.instruction |= (value & 0x1c) << 10;
10702 inst.instruction |= (value & 0x03) << 6;
10703 }
10704 }
10705
10706
10707 /* inst.operands[i] was set up by parse_address. Encode it into a
10708 Thumb32 format load or store instruction. Reject forms that cannot
10709 be used with such instructions. If is_t is true, reject forms that
10710 cannot be used with a T instruction; if is_d is true, reject forms
10711 that cannot be used with a D instruction. If it is a store insn,
10712 reject PC in Rn. */
10713
10714 static void
10715 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10716 {
10717 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10718
10719 constraint (!inst.operands[i].isreg,
10720 _("Instruction does not support =N addresses"));
10721
10722 inst.instruction |= inst.operands[i].reg << 16;
10723 if (inst.operands[i].immisreg)
10724 {
10725 constraint (is_pc, BAD_PC_ADDRESSING);
10726 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10727 constraint (inst.operands[i].negative,
10728 _("Thumb does not support negative register indexing"));
10729 constraint (inst.operands[i].postind,
10730 _("Thumb does not support register post-indexing"));
10731 constraint (inst.operands[i].writeback,
10732 _("Thumb does not support register indexing with writeback"));
10733 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10734 _("Thumb supports only LSL in shifted register indexing"));
10735
10736 inst.instruction |= inst.operands[i].imm;
10737 if (inst.operands[i].shifted)
10738 {
10739 constraint (inst.relocs[0].exp.X_op != O_constant,
10740 _("expression too complex"));
10741 constraint (inst.relocs[0].exp.X_add_number < 0
10742 || inst.relocs[0].exp.X_add_number > 3,
10743 _("shift out of range"));
10744 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10745 }
10746 inst.relocs[0].type = BFD_RELOC_UNUSED;
10747 }
10748 else if (inst.operands[i].preind)
10749 {
10750 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10751 constraint (is_t && inst.operands[i].writeback,
10752 _("cannot use writeback with this instruction"));
10753 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10754 BAD_PC_ADDRESSING);
10755
10756 if (is_d)
10757 {
10758 inst.instruction |= 0x01000000;
10759 if (inst.operands[i].writeback)
10760 inst.instruction |= 0x00200000;
10761 }
10762 else
10763 {
10764 inst.instruction |= 0x00000c00;
10765 if (inst.operands[i].writeback)
10766 inst.instruction |= 0x00000100;
10767 }
10768 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10769 }
10770 else if (inst.operands[i].postind)
10771 {
10772 gas_assert (inst.operands[i].writeback);
10773 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10774 constraint (is_t, _("cannot use post-indexing with this instruction"));
10775
10776 if (is_d)
10777 inst.instruction |= 0x00200000;
10778 else
10779 inst.instruction |= 0x00000900;
10780 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10781 }
10782 else /* unindexed - only for coprocessor */
10783 inst.error = _("instruction does not accept unindexed addressing");
10784 }
10785
10786 /* Table of Thumb instructions which exist in both 16- and 32-bit
10787 encodings (the latter only in post-V6T2 cores). The index is the
10788 value used in the insns table below. When there is more than one
10789 possible 16-bit encoding for the instruction, this table always
10790 holds variant (1).
10791 Also contains several pseudo-instructions used during relaxation. */
10792 #define T16_32_TAB \
10793 X(_adc, 4140, eb400000), \
10794 X(_adcs, 4140, eb500000), \
10795 X(_add, 1c00, eb000000), \
10796 X(_adds, 1c00, eb100000), \
10797 X(_addi, 0000, f1000000), \
10798 X(_addis, 0000, f1100000), \
10799 X(_add_pc,000f, f20f0000), \
10800 X(_add_sp,000d, f10d0000), \
10801 X(_adr, 000f, f20f0000), \
10802 X(_and, 4000, ea000000), \
10803 X(_ands, 4000, ea100000), \
10804 X(_asr, 1000, fa40f000), \
10805 X(_asrs, 1000, fa50f000), \
10806 X(_b, e000, f000b000), \
10807 X(_bcond, d000, f0008000), \
10808 X(_bf, 0000, f040e001), \
10809 X(_bfcsel,0000, f000e001), \
10810 X(_bfx, 0000, f060e001), \
10811 X(_bfl, 0000, f000c001), \
10812 X(_bflx, 0000, f070e001), \
10813 X(_bic, 4380, ea200000), \
10814 X(_bics, 4380, ea300000), \
10815 X(_cmn, 42c0, eb100f00), \
10816 X(_cmp, 2800, ebb00f00), \
10817 X(_cpsie, b660, f3af8400), \
10818 X(_cpsid, b670, f3af8600), \
10819 X(_cpy, 4600, ea4f0000), \
10820 X(_dec_sp,80dd, f1ad0d00), \
10821 X(_dls, 0000, f040e001), \
10822 X(_eor, 4040, ea800000), \
10823 X(_eors, 4040, ea900000), \
10824 X(_inc_sp,00dd, f10d0d00), \
10825 X(_ldmia, c800, e8900000), \
10826 X(_ldr, 6800, f8500000), \
10827 X(_ldrb, 7800, f8100000), \
10828 X(_ldrh, 8800, f8300000), \
10829 X(_ldrsb, 5600, f9100000), \
10830 X(_ldrsh, 5e00, f9300000), \
10831 X(_ldr_pc,4800, f85f0000), \
10832 X(_ldr_pc2,4800, f85f0000), \
10833 X(_ldr_sp,9800, f85d0000), \
10834 X(_le, 0000, f00fc001), \
10835 X(_lsl, 0000, fa00f000), \
10836 X(_lsls, 0000, fa10f000), \
10837 X(_lsr, 0800, fa20f000), \
10838 X(_lsrs, 0800, fa30f000), \
10839 X(_mov, 2000, ea4f0000), \
10840 X(_movs, 2000, ea5f0000), \
10841 X(_mul, 4340, fb00f000), \
10842 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10843 X(_mvn, 43c0, ea6f0000), \
10844 X(_mvns, 43c0, ea7f0000), \
10845 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10846 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10847 X(_orr, 4300, ea400000), \
10848 X(_orrs, 4300, ea500000), \
10849 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10850 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10851 X(_rev, ba00, fa90f080), \
10852 X(_rev16, ba40, fa90f090), \
10853 X(_revsh, bac0, fa90f0b0), \
10854 X(_ror, 41c0, fa60f000), \
10855 X(_rors, 41c0, fa70f000), \
10856 X(_sbc, 4180, eb600000), \
10857 X(_sbcs, 4180, eb700000), \
10858 X(_stmia, c000, e8800000), \
10859 X(_str, 6000, f8400000), \
10860 X(_strb, 7000, f8000000), \
10861 X(_strh, 8000, f8200000), \
10862 X(_str_sp,9000, f84d0000), \
10863 X(_sub, 1e00, eba00000), \
10864 X(_subs, 1e00, ebb00000), \
10865 X(_subi, 8000, f1a00000), \
10866 X(_subis, 8000, f1b00000), \
10867 X(_sxtb, b240, fa4ff080), \
10868 X(_sxth, b200, fa0ff080), \
10869 X(_tst, 4200, ea100f00), \
10870 X(_uxtb, b2c0, fa5ff080), \
10871 X(_uxth, b280, fa1ff080), \
10872 X(_nop, bf00, f3af8000), \
10873 X(_yield, bf10, f3af8001), \
10874 X(_wfe, bf20, f3af8002), \
10875 X(_wfi, bf30, f3af8003), \
10876 X(_wls, 0000, f040c001), \
10877 X(_sev, bf40, f3af8004), \
10878 X(_sevl, bf50, f3af8005), \
10879 X(_udf, de00, f7f0a000)
10880
10881 /* To catch errors in encoding functions, the codes are all offset by
10882 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10883 as 16-bit instructions. */
10884 #define X(a,b,c) T_MNEM##a
10885 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10886 #undef X
10887
10888 #define X(a,b,c) 0x##b
10889 static const unsigned short thumb_op16[] = { T16_32_TAB };
10890 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10891 #undef X
10892
10893 #define X(a,b,c) 0x##c
10894 static const unsigned int thumb_op32[] = { T16_32_TAB };
10895 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10896 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10897 #undef X
10898 #undef T16_32_TAB
10899
10900 /* Thumb instruction encoders, in alphabetical order. */
10901
10902 /* ADDW or SUBW. */
10903
10904 static void
10905 do_t_add_sub_w (void)
10906 {
10907 int Rd, Rn;
10908
10909 Rd = inst.operands[0].reg;
10910 Rn = inst.operands[1].reg;
10911
10912 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10913 is the SP-{plus,minus}-immediate form of the instruction. */
10914 if (Rn == REG_SP)
10915 constraint (Rd == REG_PC, BAD_PC);
10916 else
10917 reject_bad_reg (Rd);
10918
10919 inst.instruction |= (Rn << 16) | (Rd << 8);
10920 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10921 }
10922
10923 /* Parse an add or subtract instruction. We get here with inst.instruction
10924 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10925
10926 static void
10927 do_t_add_sub (void)
10928 {
10929 int Rd, Rs, Rn;
10930
10931 Rd = inst.operands[0].reg;
10932 Rs = (inst.operands[1].present
10933 ? inst.operands[1].reg /* Rd, Rs, foo */
10934 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10935
10936 if (Rd == REG_PC)
10937 set_pred_insn_type_last ();
10938
10939 if (unified_syntax)
10940 {
10941 bfd_boolean flags;
10942 bfd_boolean narrow;
10943 int opcode;
10944
10945 flags = (inst.instruction == T_MNEM_adds
10946 || inst.instruction == T_MNEM_subs);
10947 if (flags)
10948 narrow = !in_pred_block ();
10949 else
10950 narrow = in_pred_block ();
10951 if (!inst.operands[2].isreg)
10952 {
10953 int add;
10954
10955 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10956 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10957
10958 add = (inst.instruction == T_MNEM_add
10959 || inst.instruction == T_MNEM_adds);
10960 opcode = 0;
10961 if (inst.size_req != 4)
10962 {
10963 /* Attempt to use a narrow opcode, with relaxation if
10964 appropriate. */
10965 if (Rd == REG_SP && Rs == REG_SP && !flags)
10966 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10967 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10968 opcode = T_MNEM_add_sp;
10969 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10970 opcode = T_MNEM_add_pc;
10971 else if (Rd <= 7 && Rs <= 7 && narrow)
10972 {
10973 if (flags)
10974 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10975 else
10976 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10977 }
10978 if (opcode)
10979 {
10980 inst.instruction = THUMB_OP16(opcode);
10981 inst.instruction |= (Rd << 4) | Rs;
10982 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10983 || (inst.relocs[0].type
10984 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
10985 {
10986 if (inst.size_req == 2)
10987 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10988 else
10989 inst.relax = opcode;
10990 }
10991 }
10992 else
10993 constraint (inst.size_req == 2, BAD_HIREG);
10994 }
10995 if (inst.size_req == 4
10996 || (inst.size_req != 2 && !opcode))
10997 {
10998 constraint ((inst.relocs[0].type
10999 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11000 && (inst.relocs[0].type
11001 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11002 THUMB1_RELOC_ONLY);
11003 if (Rd == REG_PC)
11004 {
11005 constraint (add, BAD_PC);
11006 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11007 _("only SUBS PC, LR, #const allowed"));
11008 constraint (inst.relocs[0].exp.X_op != O_constant,
11009 _("expression too complex"));
11010 constraint (inst.relocs[0].exp.X_add_number < 0
11011 || inst.relocs[0].exp.X_add_number > 0xff,
11012 _("immediate value out of range"));
11013 inst.instruction = T2_SUBS_PC_LR
11014 | inst.relocs[0].exp.X_add_number;
11015 inst.relocs[0].type = BFD_RELOC_UNUSED;
11016 return;
11017 }
11018 else if (Rs == REG_PC)
11019 {
11020 /* Always use addw/subw. */
11021 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11022 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11023 }
11024 else
11025 {
11026 inst.instruction = THUMB_OP32 (inst.instruction);
11027 inst.instruction = (inst.instruction & 0xe1ffffff)
11028 | 0x10000000;
11029 if (flags)
11030 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11031 else
11032 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11033 }
11034 inst.instruction |= Rd << 8;
11035 inst.instruction |= Rs << 16;
11036 }
11037 }
11038 else
11039 {
11040 unsigned int value = inst.relocs[0].exp.X_add_number;
11041 unsigned int shift = inst.operands[2].shift_kind;
11042
11043 Rn = inst.operands[2].reg;
11044 /* See if we can do this with a 16-bit instruction. */
11045 if (!inst.operands[2].shifted && inst.size_req != 4)
11046 {
11047 if (Rd > 7 || Rs > 7 || Rn > 7)
11048 narrow = FALSE;
11049
11050 if (narrow)
11051 {
11052 inst.instruction = ((inst.instruction == T_MNEM_adds
11053 || inst.instruction == T_MNEM_add)
11054 ? T_OPCODE_ADD_R3
11055 : T_OPCODE_SUB_R3);
11056 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11057 return;
11058 }
11059
11060 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11061 {
11062 /* Thumb-1 cores (except v6-M) require at least one high
11063 register in a narrow non flag setting add. */
11064 if (Rd > 7 || Rn > 7
11065 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11066 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11067 {
11068 if (Rd == Rn)
11069 {
11070 Rn = Rs;
11071 Rs = Rd;
11072 }
11073 inst.instruction = T_OPCODE_ADD_HI;
11074 inst.instruction |= (Rd & 8) << 4;
11075 inst.instruction |= (Rd & 7);
11076 inst.instruction |= Rn << 3;
11077 return;
11078 }
11079 }
11080 }
11081
11082 constraint (Rd == REG_PC, BAD_PC);
11083 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11084 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11085 constraint (Rs == REG_PC, BAD_PC);
11086 reject_bad_reg (Rn);
11087
11088 /* If we get here, it can't be done in 16 bits. */
11089 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11090 _("shift must be constant"));
11091 inst.instruction = THUMB_OP32 (inst.instruction);
11092 inst.instruction |= Rd << 8;
11093 inst.instruction |= Rs << 16;
11094 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11095 _("shift value over 3 not allowed in thumb mode"));
11096 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11097 _("only LSL shift allowed in thumb mode"));
11098 encode_thumb32_shifted_operand (2);
11099 }
11100 }
11101 else
11102 {
11103 constraint (inst.instruction == T_MNEM_adds
11104 || inst.instruction == T_MNEM_subs,
11105 BAD_THUMB32);
11106
11107 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11108 {
11109 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11110 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11111 BAD_HIREG);
11112
11113 inst.instruction = (inst.instruction == T_MNEM_add
11114 ? 0x0000 : 0x8000);
11115 inst.instruction |= (Rd << 4) | Rs;
11116 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11117 return;
11118 }
11119
11120 Rn = inst.operands[2].reg;
11121 constraint (inst.operands[2].shifted, _("unshifted register required"));
11122
11123 /* We now have Rd, Rs, and Rn set to registers. */
11124 if (Rd > 7 || Rs > 7 || Rn > 7)
11125 {
11126 /* Can't do this for SUB. */
11127 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11128 inst.instruction = T_OPCODE_ADD_HI;
11129 inst.instruction |= (Rd & 8) << 4;
11130 inst.instruction |= (Rd & 7);
11131 if (Rs == Rd)
11132 inst.instruction |= Rn << 3;
11133 else if (Rn == Rd)
11134 inst.instruction |= Rs << 3;
11135 else
11136 constraint (1, _("dest must overlap one source register"));
11137 }
11138 else
11139 {
11140 inst.instruction = (inst.instruction == T_MNEM_add
11141 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11142 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11143 }
11144 }
11145 }
11146
11147 static void
11148 do_t_adr (void)
11149 {
11150 unsigned Rd;
11151
11152 Rd = inst.operands[0].reg;
11153 reject_bad_reg (Rd);
11154
11155 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11156 {
11157 /* Defer to section relaxation. */
11158 inst.relax = inst.instruction;
11159 inst.instruction = THUMB_OP16 (inst.instruction);
11160 inst.instruction |= Rd << 4;
11161 }
11162 else if (unified_syntax && inst.size_req != 2)
11163 {
11164 /* Generate a 32-bit opcode. */
11165 inst.instruction = THUMB_OP32 (inst.instruction);
11166 inst.instruction |= Rd << 8;
11167 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11168 inst.relocs[0].pc_rel = 1;
11169 }
11170 else
11171 {
11172 /* Generate a 16-bit opcode. */
11173 inst.instruction = THUMB_OP16 (inst.instruction);
11174 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11175 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11176 inst.relocs[0].pc_rel = 1;
11177 inst.instruction |= Rd << 4;
11178 }
11179
11180 if (inst.relocs[0].exp.X_op == O_symbol
11181 && inst.relocs[0].exp.X_add_symbol != NULL
11182 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11183 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11184 inst.relocs[0].exp.X_add_number += 1;
11185 }
11186
11187 /* Arithmetic instructions for which there is just one 16-bit
11188 instruction encoding, and it allows only two low registers.
11189 For maximal compatibility with ARM syntax, we allow three register
11190 operands even when Thumb-32 instructions are not available, as long
11191 as the first two are identical. For instance, both "sbc r0,r1" and
11192 "sbc r0,r0,r1" are allowed. */
11193 static void
11194 do_t_arit3 (void)
11195 {
11196 int Rd, Rs, Rn;
11197
11198 Rd = inst.operands[0].reg;
11199 Rs = (inst.operands[1].present
11200 ? inst.operands[1].reg /* Rd, Rs, foo */
11201 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11202 Rn = inst.operands[2].reg;
11203
11204 reject_bad_reg (Rd);
11205 reject_bad_reg (Rs);
11206 if (inst.operands[2].isreg)
11207 reject_bad_reg (Rn);
11208
11209 if (unified_syntax)
11210 {
11211 if (!inst.operands[2].isreg)
11212 {
11213 /* For an immediate, we always generate a 32-bit opcode;
11214 section relaxation will shrink it later if possible. */
11215 inst.instruction = THUMB_OP32 (inst.instruction);
11216 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11217 inst.instruction |= Rd << 8;
11218 inst.instruction |= Rs << 16;
11219 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11220 }
11221 else
11222 {
11223 bfd_boolean narrow;
11224
11225 /* See if we can do this with a 16-bit instruction. */
11226 if (THUMB_SETS_FLAGS (inst.instruction))
11227 narrow = !in_pred_block ();
11228 else
11229 narrow = in_pred_block ();
11230
11231 if (Rd > 7 || Rn > 7 || Rs > 7)
11232 narrow = FALSE;
11233 if (inst.operands[2].shifted)
11234 narrow = FALSE;
11235 if (inst.size_req == 4)
11236 narrow = FALSE;
11237
11238 if (narrow
11239 && Rd == Rs)
11240 {
11241 inst.instruction = THUMB_OP16 (inst.instruction);
11242 inst.instruction |= Rd;
11243 inst.instruction |= Rn << 3;
11244 return;
11245 }
11246
11247 /* If we get here, it can't be done in 16 bits. */
11248 constraint (inst.operands[2].shifted
11249 && inst.operands[2].immisreg,
11250 _("shift must be constant"));
11251 inst.instruction = THUMB_OP32 (inst.instruction);
11252 inst.instruction |= Rd << 8;
11253 inst.instruction |= Rs << 16;
11254 encode_thumb32_shifted_operand (2);
11255 }
11256 }
11257 else
11258 {
11259 /* On its face this is a lie - the instruction does set the
11260 flags. However, the only supported mnemonic in this mode
11261 says it doesn't. */
11262 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11263
11264 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11265 _("unshifted register required"));
11266 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11267 constraint (Rd != Rs,
11268 _("dest and source1 must be the same register"));
11269
11270 inst.instruction = THUMB_OP16 (inst.instruction);
11271 inst.instruction |= Rd;
11272 inst.instruction |= Rn << 3;
11273 }
11274 }
11275
11276 /* Similarly, but for instructions where the arithmetic operation is
11277 commutative, so we can allow either of them to be different from
11278 the destination operand in a 16-bit instruction. For instance, all
11279 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11280 accepted. */
11281 static void
11282 do_t_arit3c (void)
11283 {
11284 int Rd, Rs, Rn;
11285
11286 Rd = inst.operands[0].reg;
11287 Rs = (inst.operands[1].present
11288 ? inst.operands[1].reg /* Rd, Rs, foo */
11289 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11290 Rn = inst.operands[2].reg;
11291
11292 reject_bad_reg (Rd);
11293 reject_bad_reg (Rs);
11294 if (inst.operands[2].isreg)
11295 reject_bad_reg (Rn);
11296
11297 if (unified_syntax)
11298 {
11299 if (!inst.operands[2].isreg)
11300 {
11301 /* For an immediate, we always generate a 32-bit opcode;
11302 section relaxation will shrink it later if possible. */
11303 inst.instruction = THUMB_OP32 (inst.instruction);
11304 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11305 inst.instruction |= Rd << 8;
11306 inst.instruction |= Rs << 16;
11307 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11308 }
11309 else
11310 {
11311 bfd_boolean narrow;
11312
11313 /* See if we can do this with a 16-bit instruction. */
11314 if (THUMB_SETS_FLAGS (inst.instruction))
11315 narrow = !in_pred_block ();
11316 else
11317 narrow = in_pred_block ();
11318
11319 if (Rd > 7 || Rn > 7 || Rs > 7)
11320 narrow = FALSE;
11321 if (inst.operands[2].shifted)
11322 narrow = FALSE;
11323 if (inst.size_req == 4)
11324 narrow = FALSE;
11325
11326 if (narrow)
11327 {
11328 if (Rd == Rs)
11329 {
11330 inst.instruction = THUMB_OP16 (inst.instruction);
11331 inst.instruction |= Rd;
11332 inst.instruction |= Rn << 3;
11333 return;
11334 }
11335 if (Rd == Rn)
11336 {
11337 inst.instruction = THUMB_OP16 (inst.instruction);
11338 inst.instruction |= Rd;
11339 inst.instruction |= Rs << 3;
11340 return;
11341 }
11342 }
11343
11344 /* If we get here, it can't be done in 16 bits. */
11345 constraint (inst.operands[2].shifted
11346 && inst.operands[2].immisreg,
11347 _("shift must be constant"));
11348 inst.instruction = THUMB_OP32 (inst.instruction);
11349 inst.instruction |= Rd << 8;
11350 inst.instruction |= Rs << 16;
11351 encode_thumb32_shifted_operand (2);
11352 }
11353 }
11354 else
11355 {
11356 /* On its face this is a lie - the instruction does set the
11357 flags. However, the only supported mnemonic in this mode
11358 says it doesn't. */
11359 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11360
11361 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11362 _("unshifted register required"));
11363 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11364
11365 inst.instruction = THUMB_OP16 (inst.instruction);
11366 inst.instruction |= Rd;
11367
11368 if (Rd == Rs)
11369 inst.instruction |= Rn << 3;
11370 else if (Rd == Rn)
11371 inst.instruction |= Rs << 3;
11372 else
11373 constraint (1, _("dest must overlap one source register"));
11374 }
11375 }
11376
11377 static void
11378 do_t_bfc (void)
11379 {
11380 unsigned Rd;
11381 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11382 constraint (msb > 32, _("bit-field extends past end of register"));
11383 /* The instruction encoding stores the LSB and MSB,
11384 not the LSB and width. */
11385 Rd = inst.operands[0].reg;
11386 reject_bad_reg (Rd);
11387 inst.instruction |= Rd << 8;
11388 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11389 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11390 inst.instruction |= msb - 1;
11391 }
11392
11393 static void
11394 do_t_bfi (void)
11395 {
11396 int Rd, Rn;
11397 unsigned int msb;
11398
11399 Rd = inst.operands[0].reg;
11400 reject_bad_reg (Rd);
11401
11402 /* #0 in second position is alternative syntax for bfc, which is
11403 the same instruction but with REG_PC in the Rm field. */
11404 if (!inst.operands[1].isreg)
11405 Rn = REG_PC;
11406 else
11407 {
11408 Rn = inst.operands[1].reg;
11409 reject_bad_reg (Rn);
11410 }
11411
11412 msb = inst.operands[2].imm + inst.operands[3].imm;
11413 constraint (msb > 32, _("bit-field extends past end of register"));
11414 /* The instruction encoding stores the LSB and MSB,
11415 not the LSB and width. */
11416 inst.instruction |= Rd << 8;
11417 inst.instruction |= Rn << 16;
11418 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11419 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11420 inst.instruction |= msb - 1;
11421 }
11422
11423 static void
11424 do_t_bfx (void)
11425 {
11426 unsigned Rd, Rn;
11427
11428 Rd = inst.operands[0].reg;
11429 Rn = inst.operands[1].reg;
11430
11431 reject_bad_reg (Rd);
11432 reject_bad_reg (Rn);
11433
11434 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11435 _("bit-field extends past end of register"));
11436 inst.instruction |= Rd << 8;
11437 inst.instruction |= Rn << 16;
11438 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11439 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11440 inst.instruction |= inst.operands[3].imm - 1;
11441 }
11442
11443 /* ARM V5 Thumb BLX (argument parse)
11444 BLX <target_addr> which is BLX(1)
11445 BLX <Rm> which is BLX(2)
11446 Unfortunately, there are two different opcodes for this mnemonic.
11447 So, the insns[].value is not used, and the code here zaps values
11448 into inst.instruction.
11449
11450 ??? How to take advantage of the additional two bits of displacement
11451 available in Thumb32 mode? Need new relocation? */
11452
11453 static void
11454 do_t_blx (void)
11455 {
11456 set_pred_insn_type_last ();
11457
11458 if (inst.operands[0].isreg)
11459 {
11460 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11461 /* We have a register, so this is BLX(2). */
11462 inst.instruction |= inst.operands[0].reg << 3;
11463 }
11464 else
11465 {
11466 /* No register. This must be BLX(1). */
11467 inst.instruction = 0xf000e800;
11468 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11469 }
11470 }
11471
11472 static void
11473 do_t_branch (void)
11474 {
11475 int opcode;
11476 int cond;
11477 bfd_reloc_code_real_type reloc;
11478
11479 cond = inst.cond;
11480 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
11481
11482 if (in_pred_block ())
11483 {
11484 /* Conditional branches inside IT blocks are encoded as unconditional
11485 branches. */
11486 cond = COND_ALWAYS;
11487 }
11488 else
11489 cond = inst.cond;
11490
11491 if (cond != COND_ALWAYS)
11492 opcode = T_MNEM_bcond;
11493 else
11494 opcode = inst.instruction;
11495
11496 if (unified_syntax
11497 && (inst.size_req == 4
11498 || (inst.size_req != 2
11499 && (inst.operands[0].hasreloc
11500 || inst.relocs[0].exp.X_op == O_constant))))
11501 {
11502 inst.instruction = THUMB_OP32(opcode);
11503 if (cond == COND_ALWAYS)
11504 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11505 else
11506 {
11507 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11508 _("selected architecture does not support "
11509 "wide conditional branch instruction"));
11510
11511 gas_assert (cond != 0xF);
11512 inst.instruction |= cond << 22;
11513 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11514 }
11515 }
11516 else
11517 {
11518 inst.instruction = THUMB_OP16(opcode);
11519 if (cond == COND_ALWAYS)
11520 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11521 else
11522 {
11523 inst.instruction |= cond << 8;
11524 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11525 }
11526 /* Allow section relaxation. */
11527 if (unified_syntax && inst.size_req != 2)
11528 inst.relax = opcode;
11529 }
11530 inst.relocs[0].type = reloc;
11531 inst.relocs[0].pc_rel = 1;
11532 }
11533
11534 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11535 between the two is the maximum immediate allowed - which is passed in
11536 RANGE. */
11537 static void
11538 do_t_bkpt_hlt1 (int range)
11539 {
11540 constraint (inst.cond != COND_ALWAYS,
11541 _("instruction is always unconditional"));
11542 if (inst.operands[0].present)
11543 {
11544 constraint (inst.operands[0].imm > range,
11545 _("immediate value out of range"));
11546 inst.instruction |= inst.operands[0].imm;
11547 }
11548
11549 set_pred_insn_type (NEUTRAL_IT_INSN);
11550 }
11551
11552 static void
11553 do_t_hlt (void)
11554 {
11555 do_t_bkpt_hlt1 (63);
11556 }
11557
11558 static void
11559 do_t_bkpt (void)
11560 {
11561 do_t_bkpt_hlt1 (255);
11562 }
11563
11564 static void
11565 do_t_branch23 (void)
11566 {
11567 set_pred_insn_type_last ();
11568 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11569
11570 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11571 this file. We used to simply ignore the PLT reloc type here --
11572 the branch encoding is now needed to deal with TLSCALL relocs.
11573 So if we see a PLT reloc now, put it back to how it used to be to
11574 keep the preexisting behaviour. */
11575 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11576 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11577
11578 #if defined(OBJ_COFF)
11579 /* If the destination of the branch is a defined symbol which does not have
11580 the THUMB_FUNC attribute, then we must be calling a function which has
11581 the (interfacearm) attribute. We look for the Thumb entry point to that
11582 function and change the branch to refer to that function instead. */
11583 if ( inst.relocs[0].exp.X_op == O_symbol
11584 && inst.relocs[0].exp.X_add_symbol != NULL
11585 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11586 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11587 inst.relocs[0].exp.X_add_symbol
11588 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11589 #endif
11590 }
11591
11592 static void
11593 do_t_bx (void)
11594 {
11595 set_pred_insn_type_last ();
11596 inst.instruction |= inst.operands[0].reg << 3;
11597 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11598 should cause the alignment to be checked once it is known. This is
11599 because BX PC only works if the instruction is word aligned. */
11600 }
11601
11602 static void
11603 do_t_bxj (void)
11604 {
11605 int Rm;
11606
11607 set_pred_insn_type_last ();
11608 Rm = inst.operands[0].reg;
11609 reject_bad_reg (Rm);
11610 inst.instruction |= Rm << 16;
11611 }
11612
11613 static void
11614 do_t_clz (void)
11615 {
11616 unsigned Rd;
11617 unsigned Rm;
11618
11619 Rd = inst.operands[0].reg;
11620 Rm = inst.operands[1].reg;
11621
11622 reject_bad_reg (Rd);
11623 reject_bad_reg (Rm);
11624
11625 inst.instruction |= Rd << 8;
11626 inst.instruction |= Rm << 16;
11627 inst.instruction |= Rm;
11628 }
11629
11630 static void
11631 do_t_csdb (void)
11632 {
11633 set_pred_insn_type (OUTSIDE_PRED_INSN);
11634 }
11635
11636 static void
11637 do_t_cps (void)
11638 {
11639 set_pred_insn_type (OUTSIDE_PRED_INSN);
11640 inst.instruction |= inst.operands[0].imm;
11641 }
11642
11643 static void
11644 do_t_cpsi (void)
11645 {
11646 set_pred_insn_type (OUTSIDE_PRED_INSN);
11647 if (unified_syntax
11648 && (inst.operands[1].present || inst.size_req == 4)
11649 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11650 {
11651 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11652 inst.instruction = 0xf3af8000;
11653 inst.instruction |= imod << 9;
11654 inst.instruction |= inst.operands[0].imm << 5;
11655 if (inst.operands[1].present)
11656 inst.instruction |= 0x100 | inst.operands[1].imm;
11657 }
11658 else
11659 {
11660 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11661 && (inst.operands[0].imm & 4),
11662 _("selected processor does not support 'A' form "
11663 "of this instruction"));
11664 constraint (inst.operands[1].present || inst.size_req == 4,
11665 _("Thumb does not support the 2-argument "
11666 "form of this instruction"));
11667 inst.instruction |= inst.operands[0].imm;
11668 }
11669 }
11670
11671 /* THUMB CPY instruction (argument parse). */
11672
11673 static void
11674 do_t_cpy (void)
11675 {
11676 if (inst.size_req == 4)
11677 {
11678 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11679 inst.instruction |= inst.operands[0].reg << 8;
11680 inst.instruction |= inst.operands[1].reg;
11681 }
11682 else
11683 {
11684 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11685 inst.instruction |= (inst.operands[0].reg & 0x7);
11686 inst.instruction |= inst.operands[1].reg << 3;
11687 }
11688 }
11689
11690 static void
11691 do_t_cbz (void)
11692 {
11693 set_pred_insn_type (OUTSIDE_PRED_INSN);
11694 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11695 inst.instruction |= inst.operands[0].reg;
11696 inst.relocs[0].pc_rel = 1;
11697 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11698 }
11699
11700 static void
11701 do_t_dbg (void)
11702 {
11703 inst.instruction |= inst.operands[0].imm;
11704 }
11705
11706 static void
11707 do_t_div (void)
11708 {
11709 unsigned Rd, Rn, Rm;
11710
11711 Rd = inst.operands[0].reg;
11712 Rn = (inst.operands[1].present
11713 ? inst.operands[1].reg : Rd);
11714 Rm = inst.operands[2].reg;
11715
11716 reject_bad_reg (Rd);
11717 reject_bad_reg (Rn);
11718 reject_bad_reg (Rm);
11719
11720 inst.instruction |= Rd << 8;
11721 inst.instruction |= Rn << 16;
11722 inst.instruction |= Rm;
11723 }
11724
11725 static void
11726 do_t_hint (void)
11727 {
11728 if (unified_syntax && inst.size_req == 4)
11729 inst.instruction = THUMB_OP32 (inst.instruction);
11730 else
11731 inst.instruction = THUMB_OP16 (inst.instruction);
11732 }
11733
11734 static void
11735 do_t_it (void)
11736 {
11737 unsigned int cond = inst.operands[0].imm;
11738
11739 set_pred_insn_type (IT_INSN);
11740 now_pred.mask = (inst.instruction & 0xf) | 0x10;
11741 now_pred.cc = cond;
11742 now_pred.warn_deprecated = FALSE;
11743 now_pred.type = SCALAR_PRED;
11744
11745 /* If the condition is a negative condition, invert the mask. */
11746 if ((cond & 0x1) == 0x0)
11747 {
11748 unsigned int mask = inst.instruction & 0x000f;
11749
11750 if ((mask & 0x7) == 0)
11751 {
11752 /* No conversion needed. */
11753 now_pred.block_length = 1;
11754 }
11755 else if ((mask & 0x3) == 0)
11756 {
11757 mask ^= 0x8;
11758 now_pred.block_length = 2;
11759 }
11760 else if ((mask & 0x1) == 0)
11761 {
11762 mask ^= 0xC;
11763 now_pred.block_length = 3;
11764 }
11765 else
11766 {
11767 mask ^= 0xE;
11768 now_pred.block_length = 4;
11769 }
11770
11771 inst.instruction &= 0xfff0;
11772 inst.instruction |= mask;
11773 }
11774
11775 inst.instruction |= cond << 4;
11776 }
11777
11778 static void
11779 do_mve_vpt (void)
11780 {
11781 /* We are dealing with a vector predicated block. */
11782 set_pred_insn_type (VPT_INSN);
11783 now_pred.cc = 0;
11784 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
11785 | ((inst.instruction & 0xe000) >> 13);
11786 now_pred.warn_deprecated = FALSE;
11787 now_pred.type = VECTOR_PRED;
11788 }
11789
11790 /* Helper function used for both push/pop and ldm/stm. */
11791 static void
11792 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11793 bfd_boolean writeback)
11794 {
11795 bfd_boolean load, store;
11796
11797 gas_assert (base != -1 || !do_io);
11798 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11799 store = do_io && !load;
11800
11801 if (mask & (1 << 13))
11802 inst.error = _("SP not allowed in register list");
11803
11804 if (do_io && (mask & (1 << base)) != 0
11805 && writeback)
11806 inst.error = _("having the base register in the register list when "
11807 "using write back is UNPREDICTABLE");
11808
11809 if (load)
11810 {
11811 if (mask & (1 << 15))
11812 {
11813 if (mask & (1 << 14))
11814 inst.error = _("LR and PC should not both be in register list");
11815 else
11816 set_pred_insn_type_last ();
11817 }
11818 }
11819 else if (store)
11820 {
11821 if (mask & (1 << 15))
11822 inst.error = _("PC not allowed in register list");
11823 }
11824
11825 if (do_io && ((mask & (mask - 1)) == 0))
11826 {
11827 /* Single register transfers implemented as str/ldr. */
11828 if (writeback)
11829 {
11830 if (inst.instruction & (1 << 23))
11831 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11832 else
11833 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11834 }
11835 else
11836 {
11837 if (inst.instruction & (1 << 23))
11838 inst.instruction = 0x00800000; /* ia -> [base] */
11839 else
11840 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11841 }
11842
11843 inst.instruction |= 0xf8400000;
11844 if (load)
11845 inst.instruction |= 0x00100000;
11846
11847 mask = ffs (mask) - 1;
11848 mask <<= 12;
11849 }
11850 else if (writeback)
11851 inst.instruction |= WRITE_BACK;
11852
11853 inst.instruction |= mask;
11854 if (do_io)
11855 inst.instruction |= base << 16;
11856 }
11857
11858 static void
11859 do_t_ldmstm (void)
11860 {
11861 /* This really doesn't seem worth it. */
11862 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11863 _("expression too complex"));
11864 constraint (inst.operands[1].writeback,
11865 _("Thumb load/store multiple does not support {reglist}^"));
11866
11867 if (unified_syntax)
11868 {
11869 bfd_boolean narrow;
11870 unsigned mask;
11871
11872 narrow = FALSE;
11873 /* See if we can use a 16-bit instruction. */
11874 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11875 && inst.size_req != 4
11876 && !(inst.operands[1].imm & ~0xff))
11877 {
11878 mask = 1 << inst.operands[0].reg;
11879
11880 if (inst.operands[0].reg <= 7)
11881 {
11882 if (inst.instruction == T_MNEM_stmia
11883 ? inst.operands[0].writeback
11884 : (inst.operands[0].writeback
11885 == !(inst.operands[1].imm & mask)))
11886 {
11887 if (inst.instruction == T_MNEM_stmia
11888 && (inst.operands[1].imm & mask)
11889 && (inst.operands[1].imm & (mask - 1)))
11890 as_warn (_("value stored for r%d is UNKNOWN"),
11891 inst.operands[0].reg);
11892
11893 inst.instruction = THUMB_OP16 (inst.instruction);
11894 inst.instruction |= inst.operands[0].reg << 8;
11895 inst.instruction |= inst.operands[1].imm;
11896 narrow = TRUE;
11897 }
11898 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11899 {
11900 /* This means 1 register in reg list one of 3 situations:
11901 1. Instruction is stmia, but without writeback.
11902 2. lmdia without writeback, but with Rn not in
11903 reglist.
11904 3. ldmia with writeback, but with Rn in reglist.
11905 Case 3 is UNPREDICTABLE behaviour, so we handle
11906 case 1 and 2 which can be converted into a 16-bit
11907 str or ldr. The SP cases are handled below. */
11908 unsigned long opcode;
11909 /* First, record an error for Case 3. */
11910 if (inst.operands[1].imm & mask
11911 && inst.operands[0].writeback)
11912 inst.error =
11913 _("having the base register in the register list when "
11914 "using write back is UNPREDICTABLE");
11915
11916 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11917 : T_MNEM_ldr);
11918 inst.instruction = THUMB_OP16 (opcode);
11919 inst.instruction |= inst.operands[0].reg << 3;
11920 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11921 narrow = TRUE;
11922 }
11923 }
11924 else if (inst.operands[0] .reg == REG_SP)
11925 {
11926 if (inst.operands[0].writeback)
11927 {
11928 inst.instruction =
11929 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11930 ? T_MNEM_push : T_MNEM_pop);
11931 inst.instruction |= inst.operands[1].imm;
11932 narrow = TRUE;
11933 }
11934 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11935 {
11936 inst.instruction =
11937 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11938 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11939 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11940 narrow = TRUE;
11941 }
11942 }
11943 }
11944
11945 if (!narrow)
11946 {
11947 if (inst.instruction < 0xffff)
11948 inst.instruction = THUMB_OP32 (inst.instruction);
11949
11950 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
11951 inst.operands[1].imm,
11952 inst.operands[0].writeback);
11953 }
11954 }
11955 else
11956 {
11957 constraint (inst.operands[0].reg > 7
11958 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11959 constraint (inst.instruction != T_MNEM_ldmia
11960 && inst.instruction != T_MNEM_stmia,
11961 _("Thumb-2 instruction only valid in unified syntax"));
11962 if (inst.instruction == T_MNEM_stmia)
11963 {
11964 if (!inst.operands[0].writeback)
11965 as_warn (_("this instruction will write back the base register"));
11966 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11967 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11968 as_warn (_("value stored for r%d is UNKNOWN"),
11969 inst.operands[0].reg);
11970 }
11971 else
11972 {
11973 if (!inst.operands[0].writeback
11974 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11975 as_warn (_("this instruction will write back the base register"));
11976 else if (inst.operands[0].writeback
11977 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11978 as_warn (_("this instruction will not write back the base register"));
11979 }
11980
11981 inst.instruction = THUMB_OP16 (inst.instruction);
11982 inst.instruction |= inst.operands[0].reg << 8;
11983 inst.instruction |= inst.operands[1].imm;
11984 }
11985 }
11986
11987 static void
11988 do_t_ldrex (void)
11989 {
11990 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11991 || inst.operands[1].postind || inst.operands[1].writeback
11992 || inst.operands[1].immisreg || inst.operands[1].shifted
11993 || inst.operands[1].negative,
11994 BAD_ADDR_MODE);
11995
11996 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11997
11998 inst.instruction |= inst.operands[0].reg << 12;
11999 inst.instruction |= inst.operands[1].reg << 16;
12000 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12001 }
12002
12003 static void
12004 do_t_ldrexd (void)
12005 {
12006 if (!inst.operands[1].present)
12007 {
12008 constraint (inst.operands[0].reg == REG_LR,
12009 _("r14 not allowed as first register "
12010 "when second register is omitted"));
12011 inst.operands[1].reg = inst.operands[0].reg + 1;
12012 }
12013 constraint (inst.operands[0].reg == inst.operands[1].reg,
12014 BAD_OVERLAP);
12015
12016 inst.instruction |= inst.operands[0].reg << 12;
12017 inst.instruction |= inst.operands[1].reg << 8;
12018 inst.instruction |= inst.operands[2].reg << 16;
12019 }
12020
12021 static void
12022 do_t_ldst (void)
12023 {
12024 unsigned long opcode;
12025 int Rn;
12026
12027 if (inst.operands[0].isreg
12028 && !inst.operands[0].preind
12029 && inst.operands[0].reg == REG_PC)
12030 set_pred_insn_type_last ();
12031
12032 opcode = inst.instruction;
12033 if (unified_syntax)
12034 {
12035 if (!inst.operands[1].isreg)
12036 {
12037 if (opcode <= 0xffff)
12038 inst.instruction = THUMB_OP32 (opcode);
12039 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12040 return;
12041 }
12042 if (inst.operands[1].isreg
12043 && !inst.operands[1].writeback
12044 && !inst.operands[1].shifted && !inst.operands[1].postind
12045 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12046 && opcode <= 0xffff
12047 && inst.size_req != 4)
12048 {
12049 /* Insn may have a 16-bit form. */
12050 Rn = inst.operands[1].reg;
12051 if (inst.operands[1].immisreg)
12052 {
12053 inst.instruction = THUMB_OP16 (opcode);
12054 /* [Rn, Rik] */
12055 if (Rn <= 7 && inst.operands[1].imm <= 7)
12056 goto op16;
12057 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12058 reject_bad_reg (inst.operands[1].imm);
12059 }
12060 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12061 && opcode != T_MNEM_ldrsb)
12062 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12063 || (Rn == REG_SP && opcode == T_MNEM_str))
12064 {
12065 /* [Rn, #const] */
12066 if (Rn > 7)
12067 {
12068 if (Rn == REG_PC)
12069 {
12070 if (inst.relocs[0].pc_rel)
12071 opcode = T_MNEM_ldr_pc2;
12072 else
12073 opcode = T_MNEM_ldr_pc;
12074 }
12075 else
12076 {
12077 if (opcode == T_MNEM_ldr)
12078 opcode = T_MNEM_ldr_sp;
12079 else
12080 opcode = T_MNEM_str_sp;
12081 }
12082 inst.instruction = inst.operands[0].reg << 8;
12083 }
12084 else
12085 {
12086 inst.instruction = inst.operands[0].reg;
12087 inst.instruction |= inst.operands[1].reg << 3;
12088 }
12089 inst.instruction |= THUMB_OP16 (opcode);
12090 if (inst.size_req == 2)
12091 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12092 else
12093 inst.relax = opcode;
12094 return;
12095 }
12096 }
12097 /* Definitely a 32-bit variant. */
12098
12099 /* Warning for Erratum 752419. */
12100 if (opcode == T_MNEM_ldr
12101 && inst.operands[0].reg == REG_SP
12102 && inst.operands[1].writeback == 1
12103 && !inst.operands[1].immisreg)
12104 {
12105 if (no_cpu_selected ()
12106 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12107 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12108 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12109 as_warn (_("This instruction may be unpredictable "
12110 "if executed on M-profile cores "
12111 "with interrupts enabled."));
12112 }
12113
12114 /* Do some validations regarding addressing modes. */
12115 if (inst.operands[1].immisreg)
12116 reject_bad_reg (inst.operands[1].imm);
12117
12118 constraint (inst.operands[1].writeback == 1
12119 && inst.operands[0].reg == inst.operands[1].reg,
12120 BAD_OVERLAP);
12121
12122 inst.instruction = THUMB_OP32 (opcode);
12123 inst.instruction |= inst.operands[0].reg << 12;
12124 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12125 check_ldr_r15_aligned ();
12126 return;
12127 }
12128
12129 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12130
12131 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12132 {
12133 /* Only [Rn,Rm] is acceptable. */
12134 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12135 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12136 || inst.operands[1].postind || inst.operands[1].shifted
12137 || inst.operands[1].negative,
12138 _("Thumb does not support this addressing mode"));
12139 inst.instruction = THUMB_OP16 (inst.instruction);
12140 goto op16;
12141 }
12142
12143 inst.instruction = THUMB_OP16 (inst.instruction);
12144 if (!inst.operands[1].isreg)
12145 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12146 return;
12147
12148 constraint (!inst.operands[1].preind
12149 || inst.operands[1].shifted
12150 || inst.operands[1].writeback,
12151 _("Thumb does not support this addressing mode"));
12152 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12153 {
12154 constraint (inst.instruction & 0x0600,
12155 _("byte or halfword not valid for base register"));
12156 constraint (inst.operands[1].reg == REG_PC
12157 && !(inst.instruction & THUMB_LOAD_BIT),
12158 _("r15 based store not allowed"));
12159 constraint (inst.operands[1].immisreg,
12160 _("invalid base register for register offset"));
12161
12162 if (inst.operands[1].reg == REG_PC)
12163 inst.instruction = T_OPCODE_LDR_PC;
12164 else if (inst.instruction & THUMB_LOAD_BIT)
12165 inst.instruction = T_OPCODE_LDR_SP;
12166 else
12167 inst.instruction = T_OPCODE_STR_SP;
12168
12169 inst.instruction |= inst.operands[0].reg << 8;
12170 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12171 return;
12172 }
12173
12174 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12175 if (!inst.operands[1].immisreg)
12176 {
12177 /* Immediate offset. */
12178 inst.instruction |= inst.operands[0].reg;
12179 inst.instruction |= inst.operands[1].reg << 3;
12180 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12181 return;
12182 }
12183
12184 /* Register offset. */
12185 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12186 constraint (inst.operands[1].negative,
12187 _("Thumb does not support this addressing mode"));
12188
12189 op16:
12190 switch (inst.instruction)
12191 {
12192 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12193 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12194 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12195 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12196 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12197 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12198 case 0x5600 /* ldrsb */:
12199 case 0x5e00 /* ldrsh */: break;
12200 default: abort ();
12201 }
12202
12203 inst.instruction |= inst.operands[0].reg;
12204 inst.instruction |= inst.operands[1].reg << 3;
12205 inst.instruction |= inst.operands[1].imm << 6;
12206 }
12207
12208 static void
12209 do_t_ldstd (void)
12210 {
12211 if (!inst.operands[1].present)
12212 {
12213 inst.operands[1].reg = inst.operands[0].reg + 1;
12214 constraint (inst.operands[0].reg == REG_LR,
12215 _("r14 not allowed here"));
12216 constraint (inst.operands[0].reg == REG_R12,
12217 _("r12 not allowed here"));
12218 }
12219
12220 if (inst.operands[2].writeback
12221 && (inst.operands[0].reg == inst.operands[2].reg
12222 || inst.operands[1].reg == inst.operands[2].reg))
12223 as_warn (_("base register written back, and overlaps "
12224 "one of transfer registers"));
12225
12226 inst.instruction |= inst.operands[0].reg << 12;
12227 inst.instruction |= inst.operands[1].reg << 8;
12228 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12229 }
12230
12231 static void
12232 do_t_ldstt (void)
12233 {
12234 inst.instruction |= inst.operands[0].reg << 12;
12235 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12236 }
12237
12238 static void
12239 do_t_mla (void)
12240 {
12241 unsigned Rd, Rn, Rm, Ra;
12242
12243 Rd = inst.operands[0].reg;
12244 Rn = inst.operands[1].reg;
12245 Rm = inst.operands[2].reg;
12246 Ra = inst.operands[3].reg;
12247
12248 reject_bad_reg (Rd);
12249 reject_bad_reg (Rn);
12250 reject_bad_reg (Rm);
12251 reject_bad_reg (Ra);
12252
12253 inst.instruction |= Rd << 8;
12254 inst.instruction |= Rn << 16;
12255 inst.instruction |= Rm;
12256 inst.instruction |= Ra << 12;
12257 }
12258
12259 static void
12260 do_t_mlal (void)
12261 {
12262 unsigned RdLo, RdHi, Rn, Rm;
12263
12264 RdLo = inst.operands[0].reg;
12265 RdHi = inst.operands[1].reg;
12266 Rn = inst.operands[2].reg;
12267 Rm = inst.operands[3].reg;
12268
12269 reject_bad_reg (RdLo);
12270 reject_bad_reg (RdHi);
12271 reject_bad_reg (Rn);
12272 reject_bad_reg (Rm);
12273
12274 inst.instruction |= RdLo << 12;
12275 inst.instruction |= RdHi << 8;
12276 inst.instruction |= Rn << 16;
12277 inst.instruction |= Rm;
12278 }
12279
12280 static void
12281 do_t_mov_cmp (void)
12282 {
12283 unsigned Rn, Rm;
12284
12285 Rn = inst.operands[0].reg;
12286 Rm = inst.operands[1].reg;
12287
12288 if (Rn == REG_PC)
12289 set_pred_insn_type_last ();
12290
12291 if (unified_syntax)
12292 {
12293 int r0off = (inst.instruction == T_MNEM_mov
12294 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12295 unsigned long opcode;
12296 bfd_boolean narrow;
12297 bfd_boolean low_regs;
12298
12299 low_regs = (Rn <= 7 && Rm <= 7);
12300 opcode = inst.instruction;
12301 if (in_pred_block ())
12302 narrow = opcode != T_MNEM_movs;
12303 else
12304 narrow = opcode != T_MNEM_movs || low_regs;
12305 if (inst.size_req == 4
12306 || inst.operands[1].shifted)
12307 narrow = FALSE;
12308
12309 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12310 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12311 && !inst.operands[1].shifted
12312 && Rn == REG_PC
12313 && Rm == REG_LR)
12314 {
12315 inst.instruction = T2_SUBS_PC_LR;
12316 return;
12317 }
12318
12319 if (opcode == T_MNEM_cmp)
12320 {
12321 constraint (Rn == REG_PC, BAD_PC);
12322 if (narrow)
12323 {
12324 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12325 but valid. */
12326 warn_deprecated_sp (Rm);
12327 /* R15 was documented as a valid choice for Rm in ARMv6,
12328 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12329 tools reject R15, so we do too. */
12330 constraint (Rm == REG_PC, BAD_PC);
12331 }
12332 else
12333 reject_bad_reg (Rm);
12334 }
12335 else if (opcode == T_MNEM_mov
12336 || opcode == T_MNEM_movs)
12337 {
12338 if (inst.operands[1].isreg)
12339 {
12340 if (opcode == T_MNEM_movs)
12341 {
12342 reject_bad_reg (Rn);
12343 reject_bad_reg (Rm);
12344 }
12345 else if (narrow)
12346 {
12347 /* This is mov.n. */
12348 if ((Rn == REG_SP || Rn == REG_PC)
12349 && (Rm == REG_SP || Rm == REG_PC))
12350 {
12351 as_tsktsk (_("Use of r%u as a source register is "
12352 "deprecated when r%u is the destination "
12353 "register."), Rm, Rn);
12354 }
12355 }
12356 else
12357 {
12358 /* This is mov.w. */
12359 constraint (Rn == REG_PC, BAD_PC);
12360 constraint (Rm == REG_PC, BAD_PC);
12361 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12362 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12363 }
12364 }
12365 else
12366 reject_bad_reg (Rn);
12367 }
12368
12369 if (!inst.operands[1].isreg)
12370 {
12371 /* Immediate operand. */
12372 if (!in_pred_block () && opcode == T_MNEM_mov)
12373 narrow = 0;
12374 if (low_regs && narrow)
12375 {
12376 inst.instruction = THUMB_OP16 (opcode);
12377 inst.instruction |= Rn << 8;
12378 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12379 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12380 {
12381 if (inst.size_req == 2)
12382 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12383 else
12384 inst.relax = opcode;
12385 }
12386 }
12387 else
12388 {
12389 constraint ((inst.relocs[0].type
12390 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12391 && (inst.relocs[0].type
12392 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12393 THUMB1_RELOC_ONLY);
12394
12395 inst.instruction = THUMB_OP32 (inst.instruction);
12396 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12397 inst.instruction |= Rn << r0off;
12398 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12399 }
12400 }
12401 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12402 && (inst.instruction == T_MNEM_mov
12403 || inst.instruction == T_MNEM_movs))
12404 {
12405 /* Register shifts are encoded as separate shift instructions. */
12406 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12407
12408 if (in_pred_block ())
12409 narrow = !flags;
12410 else
12411 narrow = flags;
12412
12413 if (inst.size_req == 4)
12414 narrow = FALSE;
12415
12416 if (!low_regs || inst.operands[1].imm > 7)
12417 narrow = FALSE;
12418
12419 if (Rn != Rm)
12420 narrow = FALSE;
12421
12422 switch (inst.operands[1].shift_kind)
12423 {
12424 case SHIFT_LSL:
12425 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12426 break;
12427 case SHIFT_ASR:
12428 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12429 break;
12430 case SHIFT_LSR:
12431 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12432 break;
12433 case SHIFT_ROR:
12434 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12435 break;
12436 default:
12437 abort ();
12438 }
12439
12440 inst.instruction = opcode;
12441 if (narrow)
12442 {
12443 inst.instruction |= Rn;
12444 inst.instruction |= inst.operands[1].imm << 3;
12445 }
12446 else
12447 {
12448 if (flags)
12449 inst.instruction |= CONDS_BIT;
12450
12451 inst.instruction |= Rn << 8;
12452 inst.instruction |= Rm << 16;
12453 inst.instruction |= inst.operands[1].imm;
12454 }
12455 }
12456 else if (!narrow)
12457 {
12458 /* Some mov with immediate shift have narrow variants.
12459 Register shifts are handled above. */
12460 if (low_regs && inst.operands[1].shifted
12461 && (inst.instruction == T_MNEM_mov
12462 || inst.instruction == T_MNEM_movs))
12463 {
12464 if (in_pred_block ())
12465 narrow = (inst.instruction == T_MNEM_mov);
12466 else
12467 narrow = (inst.instruction == T_MNEM_movs);
12468 }
12469
12470 if (narrow)
12471 {
12472 switch (inst.operands[1].shift_kind)
12473 {
12474 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12475 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12476 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12477 default: narrow = FALSE; break;
12478 }
12479 }
12480
12481 if (narrow)
12482 {
12483 inst.instruction |= Rn;
12484 inst.instruction |= Rm << 3;
12485 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12486 }
12487 else
12488 {
12489 inst.instruction = THUMB_OP32 (inst.instruction);
12490 inst.instruction |= Rn << r0off;
12491 encode_thumb32_shifted_operand (1);
12492 }
12493 }
12494 else
12495 switch (inst.instruction)
12496 {
12497 case T_MNEM_mov:
12498 /* In v4t or v5t a move of two lowregs produces unpredictable
12499 results. Don't allow this. */
12500 if (low_regs)
12501 {
12502 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12503 "MOV Rd, Rs with two low registers is not "
12504 "permitted on this architecture");
12505 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12506 arm_ext_v6);
12507 }
12508
12509 inst.instruction = T_OPCODE_MOV_HR;
12510 inst.instruction |= (Rn & 0x8) << 4;
12511 inst.instruction |= (Rn & 0x7);
12512 inst.instruction |= Rm << 3;
12513 break;
12514
12515 case T_MNEM_movs:
12516 /* We know we have low registers at this point.
12517 Generate LSLS Rd, Rs, #0. */
12518 inst.instruction = T_OPCODE_LSL_I;
12519 inst.instruction |= Rn;
12520 inst.instruction |= Rm << 3;
12521 break;
12522
12523 case T_MNEM_cmp:
12524 if (low_regs)
12525 {
12526 inst.instruction = T_OPCODE_CMP_LR;
12527 inst.instruction |= Rn;
12528 inst.instruction |= Rm << 3;
12529 }
12530 else
12531 {
12532 inst.instruction = T_OPCODE_CMP_HR;
12533 inst.instruction |= (Rn & 0x8) << 4;
12534 inst.instruction |= (Rn & 0x7);
12535 inst.instruction |= Rm << 3;
12536 }
12537 break;
12538 }
12539 return;
12540 }
12541
12542 inst.instruction = THUMB_OP16 (inst.instruction);
12543
12544 /* PR 10443: Do not silently ignore shifted operands. */
12545 constraint (inst.operands[1].shifted,
12546 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12547
12548 if (inst.operands[1].isreg)
12549 {
12550 if (Rn < 8 && Rm < 8)
12551 {
12552 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12553 since a MOV instruction produces unpredictable results. */
12554 if (inst.instruction == T_OPCODE_MOV_I8)
12555 inst.instruction = T_OPCODE_ADD_I3;
12556 else
12557 inst.instruction = T_OPCODE_CMP_LR;
12558
12559 inst.instruction |= Rn;
12560 inst.instruction |= Rm << 3;
12561 }
12562 else
12563 {
12564 if (inst.instruction == T_OPCODE_MOV_I8)
12565 inst.instruction = T_OPCODE_MOV_HR;
12566 else
12567 inst.instruction = T_OPCODE_CMP_HR;
12568 do_t_cpy ();
12569 }
12570 }
12571 else
12572 {
12573 constraint (Rn > 7,
12574 _("only lo regs allowed with immediate"));
12575 inst.instruction |= Rn << 8;
12576 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12577 }
12578 }
12579
12580 static void
12581 do_t_mov16 (void)
12582 {
12583 unsigned Rd;
12584 bfd_vma imm;
12585 bfd_boolean top;
12586
12587 top = (inst.instruction & 0x00800000) != 0;
12588 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12589 {
12590 constraint (top, _(":lower16: not allowed in this instruction"));
12591 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12592 }
12593 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12594 {
12595 constraint (!top, _(":upper16: not allowed in this instruction"));
12596 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12597 }
12598
12599 Rd = inst.operands[0].reg;
12600 reject_bad_reg (Rd);
12601
12602 inst.instruction |= Rd << 8;
12603 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12604 {
12605 imm = inst.relocs[0].exp.X_add_number;
12606 inst.instruction |= (imm & 0xf000) << 4;
12607 inst.instruction |= (imm & 0x0800) << 15;
12608 inst.instruction |= (imm & 0x0700) << 4;
12609 inst.instruction |= (imm & 0x00ff);
12610 }
12611 }
12612
12613 static void
12614 do_t_mvn_tst (void)
12615 {
12616 unsigned Rn, Rm;
12617
12618 Rn = inst.operands[0].reg;
12619 Rm = inst.operands[1].reg;
12620
12621 if (inst.instruction == T_MNEM_cmp
12622 || inst.instruction == T_MNEM_cmn)
12623 constraint (Rn == REG_PC, BAD_PC);
12624 else
12625 reject_bad_reg (Rn);
12626 reject_bad_reg (Rm);
12627
12628 if (unified_syntax)
12629 {
12630 int r0off = (inst.instruction == T_MNEM_mvn
12631 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12632 bfd_boolean narrow;
12633
12634 if (inst.size_req == 4
12635 || inst.instruction > 0xffff
12636 || inst.operands[1].shifted
12637 || Rn > 7 || Rm > 7)
12638 narrow = FALSE;
12639 else if (inst.instruction == T_MNEM_cmn
12640 || inst.instruction == T_MNEM_tst)
12641 narrow = TRUE;
12642 else if (THUMB_SETS_FLAGS (inst.instruction))
12643 narrow = !in_pred_block ();
12644 else
12645 narrow = in_pred_block ();
12646
12647 if (!inst.operands[1].isreg)
12648 {
12649 /* For an immediate, we always generate a 32-bit opcode;
12650 section relaxation will shrink it later if possible. */
12651 if (inst.instruction < 0xffff)
12652 inst.instruction = THUMB_OP32 (inst.instruction);
12653 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12654 inst.instruction |= Rn << r0off;
12655 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12656 }
12657 else
12658 {
12659 /* See if we can do this with a 16-bit instruction. */
12660 if (narrow)
12661 {
12662 inst.instruction = THUMB_OP16 (inst.instruction);
12663 inst.instruction |= Rn;
12664 inst.instruction |= Rm << 3;
12665 }
12666 else
12667 {
12668 constraint (inst.operands[1].shifted
12669 && inst.operands[1].immisreg,
12670 _("shift must be constant"));
12671 if (inst.instruction < 0xffff)
12672 inst.instruction = THUMB_OP32 (inst.instruction);
12673 inst.instruction |= Rn << r0off;
12674 encode_thumb32_shifted_operand (1);
12675 }
12676 }
12677 }
12678 else
12679 {
12680 constraint (inst.instruction > 0xffff
12681 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12682 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12683 _("unshifted register required"));
12684 constraint (Rn > 7 || Rm > 7,
12685 BAD_HIREG);
12686
12687 inst.instruction = THUMB_OP16 (inst.instruction);
12688 inst.instruction |= Rn;
12689 inst.instruction |= Rm << 3;
12690 }
12691 }
12692
12693 static void
12694 do_t_mrs (void)
12695 {
12696 unsigned Rd;
12697
12698 if (do_vfp_nsyn_mrs () == SUCCESS)
12699 return;
12700
12701 Rd = inst.operands[0].reg;
12702 reject_bad_reg (Rd);
12703 inst.instruction |= Rd << 8;
12704
12705 if (inst.operands[1].isreg)
12706 {
12707 unsigned br = inst.operands[1].reg;
12708 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12709 as_bad (_("bad register for mrs"));
12710
12711 inst.instruction |= br & (0xf << 16);
12712 inst.instruction |= (br & 0x300) >> 4;
12713 inst.instruction |= (br & SPSR_BIT) >> 2;
12714 }
12715 else
12716 {
12717 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12718
12719 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12720 {
12721 /* PR gas/12698: The constraint is only applied for m_profile.
12722 If the user has specified -march=all, we want to ignore it as
12723 we are building for any CPU type, including non-m variants. */
12724 bfd_boolean m_profile =
12725 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12726 constraint ((flags != 0) && m_profile, _("selected processor does "
12727 "not support requested special purpose register"));
12728 }
12729 else
12730 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12731 devices). */
12732 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12733 _("'APSR', 'CPSR' or 'SPSR' expected"));
12734
12735 inst.instruction |= (flags & SPSR_BIT) >> 2;
12736 inst.instruction |= inst.operands[1].imm & 0xff;
12737 inst.instruction |= 0xf0000;
12738 }
12739 }
12740
12741 static void
12742 do_t_msr (void)
12743 {
12744 int flags;
12745 unsigned Rn;
12746
12747 if (do_vfp_nsyn_msr () == SUCCESS)
12748 return;
12749
12750 constraint (!inst.operands[1].isreg,
12751 _("Thumb encoding does not support an immediate here"));
12752
12753 if (inst.operands[0].isreg)
12754 flags = (int)(inst.operands[0].reg);
12755 else
12756 flags = inst.operands[0].imm;
12757
12758 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12759 {
12760 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12761
12762 /* PR gas/12698: The constraint is only applied for m_profile.
12763 If the user has specified -march=all, we want to ignore it as
12764 we are building for any CPU type, including non-m variants. */
12765 bfd_boolean m_profile =
12766 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12767 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12768 && (bits & ~(PSR_s | PSR_f)) != 0)
12769 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12770 && bits != PSR_f)) && m_profile,
12771 _("selected processor does not support requested special "
12772 "purpose register"));
12773 }
12774 else
12775 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12776 "requested special purpose register"));
12777
12778 Rn = inst.operands[1].reg;
12779 reject_bad_reg (Rn);
12780
12781 inst.instruction |= (flags & SPSR_BIT) >> 2;
12782 inst.instruction |= (flags & 0xf0000) >> 8;
12783 inst.instruction |= (flags & 0x300) >> 4;
12784 inst.instruction |= (flags & 0xff);
12785 inst.instruction |= Rn << 16;
12786 }
12787
12788 static void
12789 do_t_mul (void)
12790 {
12791 bfd_boolean narrow;
12792 unsigned Rd, Rn, Rm;
12793
12794 if (!inst.operands[2].present)
12795 inst.operands[2].reg = inst.operands[0].reg;
12796
12797 Rd = inst.operands[0].reg;
12798 Rn = inst.operands[1].reg;
12799 Rm = inst.operands[2].reg;
12800
12801 if (unified_syntax)
12802 {
12803 if (inst.size_req == 4
12804 || (Rd != Rn
12805 && Rd != Rm)
12806 || Rn > 7
12807 || Rm > 7)
12808 narrow = FALSE;
12809 else if (inst.instruction == T_MNEM_muls)
12810 narrow = !in_pred_block ();
12811 else
12812 narrow = in_pred_block ();
12813 }
12814 else
12815 {
12816 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12817 constraint (Rn > 7 || Rm > 7,
12818 BAD_HIREG);
12819 narrow = TRUE;
12820 }
12821
12822 if (narrow)
12823 {
12824 /* 16-bit MULS/Conditional MUL. */
12825 inst.instruction = THUMB_OP16 (inst.instruction);
12826 inst.instruction |= Rd;
12827
12828 if (Rd == Rn)
12829 inst.instruction |= Rm << 3;
12830 else if (Rd == Rm)
12831 inst.instruction |= Rn << 3;
12832 else
12833 constraint (1, _("dest must overlap one source register"));
12834 }
12835 else
12836 {
12837 constraint (inst.instruction != T_MNEM_mul,
12838 _("Thumb-2 MUL must not set flags"));
12839 /* 32-bit MUL. */
12840 inst.instruction = THUMB_OP32 (inst.instruction);
12841 inst.instruction |= Rd << 8;
12842 inst.instruction |= Rn << 16;
12843 inst.instruction |= Rm << 0;
12844
12845 reject_bad_reg (Rd);
12846 reject_bad_reg (Rn);
12847 reject_bad_reg (Rm);
12848 }
12849 }
12850
12851 static void
12852 do_t_mull (void)
12853 {
12854 unsigned RdLo, RdHi, Rn, Rm;
12855
12856 RdLo = inst.operands[0].reg;
12857 RdHi = inst.operands[1].reg;
12858 Rn = inst.operands[2].reg;
12859 Rm = inst.operands[3].reg;
12860
12861 reject_bad_reg (RdLo);
12862 reject_bad_reg (RdHi);
12863 reject_bad_reg (Rn);
12864 reject_bad_reg (Rm);
12865
12866 inst.instruction |= RdLo << 12;
12867 inst.instruction |= RdHi << 8;
12868 inst.instruction |= Rn << 16;
12869 inst.instruction |= Rm;
12870
12871 if (RdLo == RdHi)
12872 as_tsktsk (_("rdhi and rdlo must be different"));
12873 }
12874
12875 static void
12876 do_t_nop (void)
12877 {
12878 set_pred_insn_type (NEUTRAL_IT_INSN);
12879
12880 if (unified_syntax)
12881 {
12882 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12883 {
12884 inst.instruction = THUMB_OP32 (inst.instruction);
12885 inst.instruction |= inst.operands[0].imm;
12886 }
12887 else
12888 {
12889 /* PR9722: Check for Thumb2 availability before
12890 generating a thumb2 nop instruction. */
12891 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12892 {
12893 inst.instruction = THUMB_OP16 (inst.instruction);
12894 inst.instruction |= inst.operands[0].imm << 4;
12895 }
12896 else
12897 inst.instruction = 0x46c0;
12898 }
12899 }
12900 else
12901 {
12902 constraint (inst.operands[0].present,
12903 _("Thumb does not support NOP with hints"));
12904 inst.instruction = 0x46c0;
12905 }
12906 }
12907
12908 static void
12909 do_t_neg (void)
12910 {
12911 if (unified_syntax)
12912 {
12913 bfd_boolean narrow;
12914
12915 if (THUMB_SETS_FLAGS (inst.instruction))
12916 narrow = !in_pred_block ();
12917 else
12918 narrow = in_pred_block ();
12919 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12920 narrow = FALSE;
12921 if (inst.size_req == 4)
12922 narrow = FALSE;
12923
12924 if (!narrow)
12925 {
12926 inst.instruction = THUMB_OP32 (inst.instruction);
12927 inst.instruction |= inst.operands[0].reg << 8;
12928 inst.instruction |= inst.operands[1].reg << 16;
12929 }
12930 else
12931 {
12932 inst.instruction = THUMB_OP16 (inst.instruction);
12933 inst.instruction |= inst.operands[0].reg;
12934 inst.instruction |= inst.operands[1].reg << 3;
12935 }
12936 }
12937 else
12938 {
12939 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12940 BAD_HIREG);
12941 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12942
12943 inst.instruction = THUMB_OP16 (inst.instruction);
12944 inst.instruction |= inst.operands[0].reg;
12945 inst.instruction |= inst.operands[1].reg << 3;
12946 }
12947 }
12948
12949 static void
12950 do_t_orn (void)
12951 {
12952 unsigned Rd, Rn;
12953
12954 Rd = inst.operands[0].reg;
12955 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12956
12957 reject_bad_reg (Rd);
12958 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12959 reject_bad_reg (Rn);
12960
12961 inst.instruction |= Rd << 8;
12962 inst.instruction |= Rn << 16;
12963
12964 if (!inst.operands[2].isreg)
12965 {
12966 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12967 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12968 }
12969 else
12970 {
12971 unsigned Rm;
12972
12973 Rm = inst.operands[2].reg;
12974 reject_bad_reg (Rm);
12975
12976 constraint (inst.operands[2].shifted
12977 && inst.operands[2].immisreg,
12978 _("shift must be constant"));
12979 encode_thumb32_shifted_operand (2);
12980 }
12981 }
12982
12983 static void
12984 do_t_pkhbt (void)
12985 {
12986 unsigned Rd, Rn, Rm;
12987
12988 Rd = inst.operands[0].reg;
12989 Rn = inst.operands[1].reg;
12990 Rm = inst.operands[2].reg;
12991
12992 reject_bad_reg (Rd);
12993 reject_bad_reg (Rn);
12994 reject_bad_reg (Rm);
12995
12996 inst.instruction |= Rd << 8;
12997 inst.instruction |= Rn << 16;
12998 inst.instruction |= Rm;
12999 if (inst.operands[3].present)
13000 {
13001 unsigned int val = inst.relocs[0].exp.X_add_number;
13002 constraint (inst.relocs[0].exp.X_op != O_constant,
13003 _("expression too complex"));
13004 inst.instruction |= (val & 0x1c) << 10;
13005 inst.instruction |= (val & 0x03) << 6;
13006 }
13007 }
13008
13009 static void
13010 do_t_pkhtb (void)
13011 {
13012 if (!inst.operands[3].present)
13013 {
13014 unsigned Rtmp;
13015
13016 inst.instruction &= ~0x00000020;
13017
13018 /* PR 10168. Swap the Rm and Rn registers. */
13019 Rtmp = inst.operands[1].reg;
13020 inst.operands[1].reg = inst.operands[2].reg;
13021 inst.operands[2].reg = Rtmp;
13022 }
13023 do_t_pkhbt ();
13024 }
13025
13026 static void
13027 do_t_pld (void)
13028 {
13029 if (inst.operands[0].immisreg)
13030 reject_bad_reg (inst.operands[0].imm);
13031
13032 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13033 }
13034
13035 static void
13036 do_t_push_pop (void)
13037 {
13038 unsigned mask;
13039
13040 constraint (inst.operands[0].writeback,
13041 _("push/pop do not support {reglist}^"));
13042 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13043 _("expression too complex"));
13044
13045 mask = inst.operands[0].imm;
13046 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13047 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13048 else if (inst.size_req != 4
13049 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13050 ? REG_LR : REG_PC)))
13051 {
13052 inst.instruction = THUMB_OP16 (inst.instruction);
13053 inst.instruction |= THUMB_PP_PC_LR;
13054 inst.instruction |= mask & 0xff;
13055 }
13056 else if (unified_syntax)
13057 {
13058 inst.instruction = THUMB_OP32 (inst.instruction);
13059 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13060 }
13061 else
13062 {
13063 inst.error = _("invalid register list to push/pop instruction");
13064 return;
13065 }
13066 }
13067
13068 static void
13069 do_t_clrm (void)
13070 {
13071 if (unified_syntax)
13072 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13073 else
13074 {
13075 inst.error = _("invalid register list to push/pop instruction");
13076 return;
13077 }
13078 }
13079
13080 static void
13081 do_t_vscclrm (void)
13082 {
13083 if (inst.operands[0].issingle)
13084 {
13085 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13086 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13087 inst.instruction |= inst.operands[0].imm;
13088 }
13089 else
13090 {
13091 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13092 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13093 inst.instruction |= 1 << 8;
13094 inst.instruction |= inst.operands[0].imm << 1;
13095 }
13096 }
13097
13098 static void
13099 do_t_rbit (void)
13100 {
13101 unsigned Rd, Rm;
13102
13103 Rd = inst.operands[0].reg;
13104 Rm = inst.operands[1].reg;
13105
13106 reject_bad_reg (Rd);
13107 reject_bad_reg (Rm);
13108
13109 inst.instruction |= Rd << 8;
13110 inst.instruction |= Rm << 16;
13111 inst.instruction |= Rm;
13112 }
13113
13114 static void
13115 do_t_rev (void)
13116 {
13117 unsigned Rd, Rm;
13118
13119 Rd = inst.operands[0].reg;
13120 Rm = inst.operands[1].reg;
13121
13122 reject_bad_reg (Rd);
13123 reject_bad_reg (Rm);
13124
13125 if (Rd <= 7 && Rm <= 7
13126 && inst.size_req != 4)
13127 {
13128 inst.instruction = THUMB_OP16 (inst.instruction);
13129 inst.instruction |= Rd;
13130 inst.instruction |= Rm << 3;
13131 }
13132 else if (unified_syntax)
13133 {
13134 inst.instruction = THUMB_OP32 (inst.instruction);
13135 inst.instruction |= Rd << 8;
13136 inst.instruction |= Rm << 16;
13137 inst.instruction |= Rm;
13138 }
13139 else
13140 inst.error = BAD_HIREG;
13141 }
13142
13143 static void
13144 do_t_rrx (void)
13145 {
13146 unsigned Rd, Rm;
13147
13148 Rd = inst.operands[0].reg;
13149 Rm = inst.operands[1].reg;
13150
13151 reject_bad_reg (Rd);
13152 reject_bad_reg (Rm);
13153
13154 inst.instruction |= Rd << 8;
13155 inst.instruction |= Rm;
13156 }
13157
13158 static void
13159 do_t_rsb (void)
13160 {
13161 unsigned Rd, Rs;
13162
13163 Rd = inst.operands[0].reg;
13164 Rs = (inst.operands[1].present
13165 ? inst.operands[1].reg /* Rd, Rs, foo */
13166 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13167
13168 reject_bad_reg (Rd);
13169 reject_bad_reg (Rs);
13170 if (inst.operands[2].isreg)
13171 reject_bad_reg (inst.operands[2].reg);
13172
13173 inst.instruction |= Rd << 8;
13174 inst.instruction |= Rs << 16;
13175 if (!inst.operands[2].isreg)
13176 {
13177 bfd_boolean narrow;
13178
13179 if ((inst.instruction & 0x00100000) != 0)
13180 narrow = !in_pred_block ();
13181 else
13182 narrow = in_pred_block ();
13183
13184 if (Rd > 7 || Rs > 7)
13185 narrow = FALSE;
13186
13187 if (inst.size_req == 4 || !unified_syntax)
13188 narrow = FALSE;
13189
13190 if (inst.relocs[0].exp.X_op != O_constant
13191 || inst.relocs[0].exp.X_add_number != 0)
13192 narrow = FALSE;
13193
13194 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13195 relaxation, but it doesn't seem worth the hassle. */
13196 if (narrow)
13197 {
13198 inst.relocs[0].type = BFD_RELOC_UNUSED;
13199 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13200 inst.instruction |= Rs << 3;
13201 inst.instruction |= Rd;
13202 }
13203 else
13204 {
13205 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13206 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13207 }
13208 }
13209 else
13210 encode_thumb32_shifted_operand (2);
13211 }
13212
13213 static void
13214 do_t_setend (void)
13215 {
13216 if (warn_on_deprecated
13217 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13218 as_tsktsk (_("setend use is deprecated for ARMv8"));
13219
13220 set_pred_insn_type (OUTSIDE_PRED_INSN);
13221 if (inst.operands[0].imm)
13222 inst.instruction |= 0x8;
13223 }
13224
13225 static void
13226 do_t_shift (void)
13227 {
13228 if (!inst.operands[1].present)
13229 inst.operands[1].reg = inst.operands[0].reg;
13230
13231 if (unified_syntax)
13232 {
13233 bfd_boolean narrow;
13234 int shift_kind;
13235
13236 switch (inst.instruction)
13237 {
13238 case T_MNEM_asr:
13239 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13240 case T_MNEM_lsl:
13241 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13242 case T_MNEM_lsr:
13243 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13244 case T_MNEM_ror:
13245 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13246 default: abort ();
13247 }
13248
13249 if (THUMB_SETS_FLAGS (inst.instruction))
13250 narrow = !in_pred_block ();
13251 else
13252 narrow = in_pred_block ();
13253 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13254 narrow = FALSE;
13255 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13256 narrow = FALSE;
13257 if (inst.operands[2].isreg
13258 && (inst.operands[1].reg != inst.operands[0].reg
13259 || inst.operands[2].reg > 7))
13260 narrow = FALSE;
13261 if (inst.size_req == 4)
13262 narrow = FALSE;
13263
13264 reject_bad_reg (inst.operands[0].reg);
13265 reject_bad_reg (inst.operands[1].reg);
13266
13267 if (!narrow)
13268 {
13269 if (inst.operands[2].isreg)
13270 {
13271 reject_bad_reg (inst.operands[2].reg);
13272 inst.instruction = THUMB_OP32 (inst.instruction);
13273 inst.instruction |= inst.operands[0].reg << 8;
13274 inst.instruction |= inst.operands[1].reg << 16;
13275 inst.instruction |= inst.operands[2].reg;
13276
13277 /* PR 12854: Error on extraneous shifts. */
13278 constraint (inst.operands[2].shifted,
13279 _("extraneous shift as part of operand to shift insn"));
13280 }
13281 else
13282 {
13283 inst.operands[1].shifted = 1;
13284 inst.operands[1].shift_kind = shift_kind;
13285 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13286 ? T_MNEM_movs : T_MNEM_mov);
13287 inst.instruction |= inst.operands[0].reg << 8;
13288 encode_thumb32_shifted_operand (1);
13289 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13290 inst.relocs[0].type = BFD_RELOC_UNUSED;
13291 }
13292 }
13293 else
13294 {
13295 if (inst.operands[2].isreg)
13296 {
13297 switch (shift_kind)
13298 {
13299 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13300 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13301 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13302 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13303 default: abort ();
13304 }
13305
13306 inst.instruction |= inst.operands[0].reg;
13307 inst.instruction |= inst.operands[2].reg << 3;
13308
13309 /* PR 12854: Error on extraneous shifts. */
13310 constraint (inst.operands[2].shifted,
13311 _("extraneous shift as part of operand to shift insn"));
13312 }
13313 else
13314 {
13315 switch (shift_kind)
13316 {
13317 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13318 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13319 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13320 default: abort ();
13321 }
13322 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13323 inst.instruction |= inst.operands[0].reg;
13324 inst.instruction |= inst.operands[1].reg << 3;
13325 }
13326 }
13327 }
13328 else
13329 {
13330 constraint (inst.operands[0].reg > 7
13331 || inst.operands[1].reg > 7, BAD_HIREG);
13332 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13333
13334 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13335 {
13336 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13337 constraint (inst.operands[0].reg != inst.operands[1].reg,
13338 _("source1 and dest must be same register"));
13339
13340 switch (inst.instruction)
13341 {
13342 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13343 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13344 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13345 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13346 default: abort ();
13347 }
13348
13349 inst.instruction |= inst.operands[0].reg;
13350 inst.instruction |= inst.operands[2].reg << 3;
13351
13352 /* PR 12854: Error on extraneous shifts. */
13353 constraint (inst.operands[2].shifted,
13354 _("extraneous shift as part of operand to shift insn"));
13355 }
13356 else
13357 {
13358 switch (inst.instruction)
13359 {
13360 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13361 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13362 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13363 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13364 default: abort ();
13365 }
13366 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13367 inst.instruction |= inst.operands[0].reg;
13368 inst.instruction |= inst.operands[1].reg << 3;
13369 }
13370 }
13371 }
13372
13373 static void
13374 do_t_simd (void)
13375 {
13376 unsigned Rd, Rn, Rm;
13377
13378 Rd = inst.operands[0].reg;
13379 Rn = inst.operands[1].reg;
13380 Rm = inst.operands[2].reg;
13381
13382 reject_bad_reg (Rd);
13383 reject_bad_reg (Rn);
13384 reject_bad_reg (Rm);
13385
13386 inst.instruction |= Rd << 8;
13387 inst.instruction |= Rn << 16;
13388 inst.instruction |= Rm;
13389 }
13390
13391 static void
13392 do_t_simd2 (void)
13393 {
13394 unsigned Rd, Rn, Rm;
13395
13396 Rd = inst.operands[0].reg;
13397 Rm = inst.operands[1].reg;
13398 Rn = inst.operands[2].reg;
13399
13400 reject_bad_reg (Rd);
13401 reject_bad_reg (Rn);
13402 reject_bad_reg (Rm);
13403
13404 inst.instruction |= Rd << 8;
13405 inst.instruction |= Rn << 16;
13406 inst.instruction |= Rm;
13407 }
13408
13409 static void
13410 do_t_smc (void)
13411 {
13412 unsigned int value = inst.relocs[0].exp.X_add_number;
13413 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13414 _("SMC is not permitted on this architecture"));
13415 constraint (inst.relocs[0].exp.X_op != O_constant,
13416 _("expression too complex"));
13417 inst.relocs[0].type = BFD_RELOC_UNUSED;
13418 inst.instruction |= (value & 0xf000) >> 12;
13419 inst.instruction |= (value & 0x0ff0);
13420 inst.instruction |= (value & 0x000f) << 16;
13421 /* PR gas/15623: SMC instructions must be last in an IT block. */
13422 set_pred_insn_type_last ();
13423 }
13424
13425 static void
13426 do_t_hvc (void)
13427 {
13428 unsigned int value = inst.relocs[0].exp.X_add_number;
13429
13430 inst.relocs[0].type = BFD_RELOC_UNUSED;
13431 inst.instruction |= (value & 0x0fff);
13432 inst.instruction |= (value & 0xf000) << 4;
13433 }
13434
13435 static void
13436 do_t_ssat_usat (int bias)
13437 {
13438 unsigned Rd, Rn;
13439
13440 Rd = inst.operands[0].reg;
13441 Rn = inst.operands[2].reg;
13442
13443 reject_bad_reg (Rd);
13444 reject_bad_reg (Rn);
13445
13446 inst.instruction |= Rd << 8;
13447 inst.instruction |= inst.operands[1].imm - bias;
13448 inst.instruction |= Rn << 16;
13449
13450 if (inst.operands[3].present)
13451 {
13452 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13453
13454 inst.relocs[0].type = BFD_RELOC_UNUSED;
13455
13456 constraint (inst.relocs[0].exp.X_op != O_constant,
13457 _("expression too complex"));
13458
13459 if (shift_amount != 0)
13460 {
13461 constraint (shift_amount > 31,
13462 _("shift expression is too large"));
13463
13464 if (inst.operands[3].shift_kind == SHIFT_ASR)
13465 inst.instruction |= 0x00200000; /* sh bit. */
13466
13467 inst.instruction |= (shift_amount & 0x1c) << 10;
13468 inst.instruction |= (shift_amount & 0x03) << 6;
13469 }
13470 }
13471 }
13472
13473 static void
13474 do_t_ssat (void)
13475 {
13476 do_t_ssat_usat (1);
13477 }
13478
13479 static void
13480 do_t_ssat16 (void)
13481 {
13482 unsigned Rd, Rn;
13483
13484 Rd = inst.operands[0].reg;
13485 Rn = inst.operands[2].reg;
13486
13487 reject_bad_reg (Rd);
13488 reject_bad_reg (Rn);
13489
13490 inst.instruction |= Rd << 8;
13491 inst.instruction |= inst.operands[1].imm - 1;
13492 inst.instruction |= Rn << 16;
13493 }
13494
13495 static void
13496 do_t_strex (void)
13497 {
13498 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13499 || inst.operands[2].postind || inst.operands[2].writeback
13500 || inst.operands[2].immisreg || inst.operands[2].shifted
13501 || inst.operands[2].negative,
13502 BAD_ADDR_MODE);
13503
13504 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13505
13506 inst.instruction |= inst.operands[0].reg << 8;
13507 inst.instruction |= inst.operands[1].reg << 12;
13508 inst.instruction |= inst.operands[2].reg << 16;
13509 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13510 }
13511
13512 static void
13513 do_t_strexd (void)
13514 {
13515 if (!inst.operands[2].present)
13516 inst.operands[2].reg = inst.operands[1].reg + 1;
13517
13518 constraint (inst.operands[0].reg == inst.operands[1].reg
13519 || inst.operands[0].reg == inst.operands[2].reg
13520 || inst.operands[0].reg == inst.operands[3].reg,
13521 BAD_OVERLAP);
13522
13523 inst.instruction |= inst.operands[0].reg;
13524 inst.instruction |= inst.operands[1].reg << 12;
13525 inst.instruction |= inst.operands[2].reg << 8;
13526 inst.instruction |= inst.operands[3].reg << 16;
13527 }
13528
13529 static void
13530 do_t_sxtah (void)
13531 {
13532 unsigned Rd, Rn, Rm;
13533
13534 Rd = inst.operands[0].reg;
13535 Rn = inst.operands[1].reg;
13536 Rm = inst.operands[2].reg;
13537
13538 reject_bad_reg (Rd);
13539 reject_bad_reg (Rn);
13540 reject_bad_reg (Rm);
13541
13542 inst.instruction |= Rd << 8;
13543 inst.instruction |= Rn << 16;
13544 inst.instruction |= Rm;
13545 inst.instruction |= inst.operands[3].imm << 4;
13546 }
13547
13548 static void
13549 do_t_sxth (void)
13550 {
13551 unsigned Rd, Rm;
13552
13553 Rd = inst.operands[0].reg;
13554 Rm = inst.operands[1].reg;
13555
13556 reject_bad_reg (Rd);
13557 reject_bad_reg (Rm);
13558
13559 if (inst.instruction <= 0xffff
13560 && inst.size_req != 4
13561 && Rd <= 7 && Rm <= 7
13562 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13563 {
13564 inst.instruction = THUMB_OP16 (inst.instruction);
13565 inst.instruction |= Rd;
13566 inst.instruction |= Rm << 3;
13567 }
13568 else if (unified_syntax)
13569 {
13570 if (inst.instruction <= 0xffff)
13571 inst.instruction = THUMB_OP32 (inst.instruction);
13572 inst.instruction |= Rd << 8;
13573 inst.instruction |= Rm;
13574 inst.instruction |= inst.operands[2].imm << 4;
13575 }
13576 else
13577 {
13578 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13579 _("Thumb encoding does not support rotation"));
13580 constraint (1, BAD_HIREG);
13581 }
13582 }
13583
13584 static void
13585 do_t_swi (void)
13586 {
13587 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13588 }
13589
13590 static void
13591 do_t_tb (void)
13592 {
13593 unsigned Rn, Rm;
13594 int half;
13595
13596 half = (inst.instruction & 0x10) != 0;
13597 set_pred_insn_type_last ();
13598 constraint (inst.operands[0].immisreg,
13599 _("instruction requires register index"));
13600
13601 Rn = inst.operands[0].reg;
13602 Rm = inst.operands[0].imm;
13603
13604 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13605 constraint (Rn == REG_SP, BAD_SP);
13606 reject_bad_reg (Rm);
13607
13608 constraint (!half && inst.operands[0].shifted,
13609 _("instruction does not allow shifted index"));
13610 inst.instruction |= (Rn << 16) | Rm;
13611 }
13612
13613 static void
13614 do_t_udf (void)
13615 {
13616 if (!inst.operands[0].present)
13617 inst.operands[0].imm = 0;
13618
13619 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13620 {
13621 constraint (inst.size_req == 2,
13622 _("immediate value out of range"));
13623 inst.instruction = THUMB_OP32 (inst.instruction);
13624 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13625 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13626 }
13627 else
13628 {
13629 inst.instruction = THUMB_OP16 (inst.instruction);
13630 inst.instruction |= inst.operands[0].imm;
13631 }
13632
13633 set_pred_insn_type (NEUTRAL_IT_INSN);
13634 }
13635
13636
13637 static void
13638 do_t_usat (void)
13639 {
13640 do_t_ssat_usat (0);
13641 }
13642
13643 static void
13644 do_t_usat16 (void)
13645 {
13646 unsigned Rd, Rn;
13647
13648 Rd = inst.operands[0].reg;
13649 Rn = inst.operands[2].reg;
13650
13651 reject_bad_reg (Rd);
13652 reject_bad_reg (Rn);
13653
13654 inst.instruction |= Rd << 8;
13655 inst.instruction |= inst.operands[1].imm;
13656 inst.instruction |= Rn << 16;
13657 }
13658
13659 /* Checking the range of the branch offset (VAL) with NBITS bits
13660 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13661 static int
13662 v8_1_branch_value_check (int val, int nbits, int is_signed)
13663 {
13664 gas_assert (nbits > 0 && nbits <= 32);
13665 if (is_signed)
13666 {
13667 int cmp = (1 << (nbits - 1));
13668 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13669 return FAIL;
13670 }
13671 else
13672 {
13673 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13674 return FAIL;
13675 }
13676 return SUCCESS;
13677 }
13678
13679 /* For branches in Armv8.1-M Mainline. */
13680 static void
13681 do_t_branch_future (void)
13682 {
13683 unsigned long insn = inst.instruction;
13684
13685 inst.instruction = THUMB_OP32 (inst.instruction);
13686 if (inst.operands[0].hasreloc == 0)
13687 {
13688 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13689 as_bad (BAD_BRANCH_OFF);
13690
13691 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13692 }
13693 else
13694 {
13695 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13696 inst.relocs[0].pc_rel = 1;
13697 }
13698
13699 switch (insn)
13700 {
13701 case T_MNEM_bf:
13702 if (inst.operands[1].hasreloc == 0)
13703 {
13704 int val = inst.operands[1].imm;
13705 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13706 as_bad (BAD_BRANCH_OFF);
13707
13708 int immA = (val & 0x0001f000) >> 12;
13709 int immB = (val & 0x00000ffc) >> 2;
13710 int immC = (val & 0x00000002) >> 1;
13711 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13712 }
13713 else
13714 {
13715 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13716 inst.relocs[1].pc_rel = 1;
13717 }
13718 break;
13719
13720 case T_MNEM_bfl:
13721 if (inst.operands[1].hasreloc == 0)
13722 {
13723 int val = inst.operands[1].imm;
13724 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13725 as_bad (BAD_BRANCH_OFF);
13726
13727 int immA = (val & 0x0007f000) >> 12;
13728 int immB = (val & 0x00000ffc) >> 2;
13729 int immC = (val & 0x00000002) >> 1;
13730 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13731 }
13732 else
13733 {
13734 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13735 inst.relocs[1].pc_rel = 1;
13736 }
13737 break;
13738
13739 case T_MNEM_bfcsel:
13740 /* Operand 1. */
13741 if (inst.operands[1].hasreloc == 0)
13742 {
13743 int val = inst.operands[1].imm;
13744 int immA = (val & 0x00001000) >> 12;
13745 int immB = (val & 0x00000ffc) >> 2;
13746 int immC = (val & 0x00000002) >> 1;
13747 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13748 }
13749 else
13750 {
13751 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13752 inst.relocs[1].pc_rel = 1;
13753 }
13754
13755 /* Operand 2. */
13756 if (inst.operands[2].hasreloc == 0)
13757 {
13758 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13759 int val2 = inst.operands[2].imm;
13760 int val0 = inst.operands[0].imm & 0x1f;
13761 int diff = val2 - val0;
13762 if (diff == 4)
13763 inst.instruction |= 1 << 17; /* T bit. */
13764 else if (diff != 2)
13765 as_bad (_("out of range label-relative fixup value"));
13766 }
13767 else
13768 {
13769 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13770 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13771 inst.relocs[2].pc_rel = 1;
13772 }
13773
13774 /* Operand 3. */
13775 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13776 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13777 break;
13778
13779 case T_MNEM_bfx:
13780 case T_MNEM_bflx:
13781 inst.instruction |= inst.operands[1].reg << 16;
13782 break;
13783
13784 default: abort ();
13785 }
13786 }
13787
13788 /* Helper function for do_t_loloop to handle relocations. */
13789 static void
13790 v8_1_loop_reloc (int is_le)
13791 {
13792 if (inst.relocs[0].exp.X_op == O_constant)
13793 {
13794 int value = inst.relocs[0].exp.X_add_number;
13795 value = (is_le) ? -value : value;
13796
13797 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13798 as_bad (BAD_BRANCH_OFF);
13799
13800 int imml, immh;
13801
13802 immh = (value & 0x00000ffc) >> 2;
13803 imml = (value & 0x00000002) >> 1;
13804
13805 inst.instruction |= (imml << 11) | (immh << 1);
13806 }
13807 else
13808 {
13809 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13810 inst.relocs[0].pc_rel = 1;
13811 }
13812 }
13813
13814 /* To handle the Scalar Low Overhead Loop instructions
13815 in Armv8.1-M Mainline. */
13816 static void
13817 do_t_loloop (void)
13818 {
13819 unsigned long insn = inst.instruction;
13820
13821 set_pred_insn_type (OUTSIDE_PRED_INSN);
13822 inst.instruction = THUMB_OP32 (inst.instruction);
13823
13824 switch (insn)
13825 {
13826 case T_MNEM_le:
13827 /* le <label>. */
13828 if (!inst.operands[0].present)
13829 inst.instruction |= 1 << 21;
13830
13831 v8_1_loop_reloc (TRUE);
13832 break;
13833
13834 case T_MNEM_wls:
13835 v8_1_loop_reloc (FALSE);
13836 /* Fall through. */
13837 case T_MNEM_dls:
13838 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13839 inst.instruction |= (inst.operands[1].reg << 16);
13840 break;
13841
13842 default: abort();
13843 }
13844 }
13845
13846 /* MVE instruction encoder helpers. */
13847 #define M_MNEM_vabav 0xee800f01
13848 #define M_MNEM_vmladav 0xeef00e00
13849 #define M_MNEM_vmladava 0xeef00e20
13850 #define M_MNEM_vmladavx 0xeef01e00
13851 #define M_MNEM_vmladavax 0xeef01e20
13852 #define M_MNEM_vmlsdav 0xeef00e01
13853 #define M_MNEM_vmlsdava 0xeef00e21
13854 #define M_MNEM_vmlsdavx 0xeef01e01
13855 #define M_MNEM_vmlsdavax 0xeef01e21
13856 #define M_MNEM_vmullt 0xee011e00
13857 #define M_MNEM_vmullb 0xee010e00
13858
13859 /* Neon instruction encoder helpers. */
13860
13861 /* Encodings for the different types for various Neon opcodes. */
13862
13863 /* An "invalid" code for the following tables. */
13864 #define N_INV -1u
13865
13866 struct neon_tab_entry
13867 {
13868 unsigned integer;
13869 unsigned float_or_poly;
13870 unsigned scalar_or_imm;
13871 };
13872
13873 /* Map overloaded Neon opcodes to their respective encodings. */
13874 #define NEON_ENC_TAB \
13875 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13876 X(vabdl, 0x0800700, N_INV, N_INV), \
13877 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13878 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13879 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13880 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13881 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13882 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13883 X(vaddl, 0x0800000, N_INV, N_INV), \
13884 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13885 X(vsubl, 0x0800200, N_INV, N_INV), \
13886 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13887 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13888 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13889 /* Register variants of the following two instructions are encoded as
13890 vcge / vcgt with the operands reversed. */ \
13891 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13892 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13893 X(vfma, N_INV, 0x0000c10, N_INV), \
13894 X(vfms, N_INV, 0x0200c10, N_INV), \
13895 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13896 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13897 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13898 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13899 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13900 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13901 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13902 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13903 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13904 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13905 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13906 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13907 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13908 X(vshl, 0x0000400, N_INV, 0x0800510), \
13909 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13910 X(vand, 0x0000110, N_INV, 0x0800030), \
13911 X(vbic, 0x0100110, N_INV, 0x0800030), \
13912 X(veor, 0x1000110, N_INV, N_INV), \
13913 X(vorn, 0x0300110, N_INV, 0x0800010), \
13914 X(vorr, 0x0200110, N_INV, 0x0800010), \
13915 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13916 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13917 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13918 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13919 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13920 X(vst1, 0x0000000, 0x0800000, N_INV), \
13921 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13922 X(vst2, 0x0000100, 0x0800100, N_INV), \
13923 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13924 X(vst3, 0x0000200, 0x0800200, N_INV), \
13925 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13926 X(vst4, 0x0000300, 0x0800300, N_INV), \
13927 X(vmovn, 0x1b20200, N_INV, N_INV), \
13928 X(vtrn, 0x1b20080, N_INV, N_INV), \
13929 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13930 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13931 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13932 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13933 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13934 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13935 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13936 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13937 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13938 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13939 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13940 X(vseleq, 0xe000a00, N_INV, N_INV), \
13941 X(vselvs, 0xe100a00, N_INV, N_INV), \
13942 X(vselge, 0xe200a00, N_INV, N_INV), \
13943 X(vselgt, 0xe300a00, N_INV, N_INV), \
13944 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13945 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13946 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13947 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13948 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13949 X(aes, 0x3b00300, N_INV, N_INV), \
13950 X(sha3op, 0x2000c00, N_INV, N_INV), \
13951 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13952 X(sha2op, 0x3ba0380, N_INV, N_INV)
13953
13954 enum neon_opc
13955 {
13956 #define X(OPC,I,F,S) N_MNEM_##OPC
13957 NEON_ENC_TAB
13958 #undef X
13959 };
13960
13961 static const struct neon_tab_entry neon_enc_tab[] =
13962 {
13963 #define X(OPC,I,F,S) { (I), (F), (S) }
13964 NEON_ENC_TAB
13965 #undef X
13966 };
13967
13968 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13969 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13970 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13971 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13972 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13973 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13974 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13975 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13976 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13977 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13978 #define NEON_ENC_SINGLE_(X) \
13979 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13980 #define NEON_ENC_DOUBLE_(X) \
13981 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13982 #define NEON_ENC_FPV8_(X) \
13983 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13984
13985 #define NEON_ENCODE(type, inst) \
13986 do \
13987 { \
13988 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13989 inst.is_neon = 1; \
13990 } \
13991 while (0)
13992
13993 #define check_neon_suffixes \
13994 do \
13995 { \
13996 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13997 { \
13998 as_bad (_("invalid neon suffix for non neon instruction")); \
13999 return; \
14000 } \
14001 } \
14002 while (0)
14003
14004 /* Define shapes for instruction operands. The following mnemonic characters
14005 are used in this table:
14006
14007 F - VFP S<n> register
14008 D - Neon D<n> register
14009 Q - Neon Q<n> register
14010 I - Immediate
14011 S - Scalar
14012 R - ARM register
14013 L - D<n> register list
14014
14015 This table is used to generate various data:
14016 - enumerations of the form NS_DDR to be used as arguments to
14017 neon_select_shape.
14018 - a table classifying shapes into single, double, quad, mixed.
14019 - a table used to drive neon_select_shape. */
14020
14021 #define NEON_SHAPE_DEF \
14022 X(3, (R, Q, Q), QUAD), \
14023 X(3, (D, D, D), DOUBLE), \
14024 X(3, (Q, Q, Q), QUAD), \
14025 X(3, (D, D, I), DOUBLE), \
14026 X(3, (Q, Q, I), QUAD), \
14027 X(3, (D, D, S), DOUBLE), \
14028 X(3, (Q, Q, S), QUAD), \
14029 X(3, (Q, Q, R), QUAD), \
14030 X(2, (D, D), DOUBLE), \
14031 X(2, (Q, Q), QUAD), \
14032 X(2, (D, S), DOUBLE), \
14033 X(2, (Q, S), QUAD), \
14034 X(2, (D, R), DOUBLE), \
14035 X(2, (Q, R), QUAD), \
14036 X(2, (D, I), DOUBLE), \
14037 X(2, (Q, I), QUAD), \
14038 X(3, (D, L, D), DOUBLE), \
14039 X(2, (D, Q), MIXED), \
14040 X(2, (Q, D), MIXED), \
14041 X(3, (D, Q, I), MIXED), \
14042 X(3, (Q, D, I), MIXED), \
14043 X(3, (Q, D, D), MIXED), \
14044 X(3, (D, Q, Q), MIXED), \
14045 X(3, (Q, Q, D), MIXED), \
14046 X(3, (Q, D, S), MIXED), \
14047 X(3, (D, Q, S), MIXED), \
14048 X(4, (D, D, D, I), DOUBLE), \
14049 X(4, (Q, Q, Q, I), QUAD), \
14050 X(4, (D, D, S, I), DOUBLE), \
14051 X(4, (Q, Q, S, I), QUAD), \
14052 X(2, (F, F), SINGLE), \
14053 X(3, (F, F, F), SINGLE), \
14054 X(2, (F, I), SINGLE), \
14055 X(2, (F, D), MIXED), \
14056 X(2, (D, F), MIXED), \
14057 X(3, (F, F, I), MIXED), \
14058 X(4, (R, R, F, F), SINGLE), \
14059 X(4, (F, F, R, R), SINGLE), \
14060 X(3, (D, R, R), DOUBLE), \
14061 X(3, (R, R, D), DOUBLE), \
14062 X(2, (S, R), SINGLE), \
14063 X(2, (R, S), SINGLE), \
14064 X(2, (F, R), SINGLE), \
14065 X(2, (R, F), SINGLE), \
14066 /* Half float shape supported so far. */\
14067 X (2, (H, D), MIXED), \
14068 X (2, (D, H), MIXED), \
14069 X (2, (H, F), MIXED), \
14070 X (2, (F, H), MIXED), \
14071 X (2, (H, H), HALF), \
14072 X (2, (H, R), HALF), \
14073 X (2, (R, H), HALF), \
14074 X (2, (H, I), HALF), \
14075 X (3, (H, H, H), HALF), \
14076 X (3, (H, F, I), MIXED), \
14077 X (3, (F, H, I), MIXED), \
14078 X (3, (D, H, H), MIXED), \
14079 X (3, (D, H, S), MIXED)
14080
14081 #define S2(A,B) NS_##A##B
14082 #define S3(A,B,C) NS_##A##B##C
14083 #define S4(A,B,C,D) NS_##A##B##C##D
14084
14085 #define X(N, L, C) S##N L
14086
14087 enum neon_shape
14088 {
14089 NEON_SHAPE_DEF,
14090 NS_NULL
14091 };
14092
14093 #undef X
14094 #undef S2
14095 #undef S3
14096 #undef S4
14097
14098 enum neon_shape_class
14099 {
14100 SC_HALF,
14101 SC_SINGLE,
14102 SC_DOUBLE,
14103 SC_QUAD,
14104 SC_MIXED
14105 };
14106
14107 #define X(N, L, C) SC_##C
14108
14109 static enum neon_shape_class neon_shape_class[] =
14110 {
14111 NEON_SHAPE_DEF
14112 };
14113
14114 #undef X
14115
14116 enum neon_shape_el
14117 {
14118 SE_H,
14119 SE_F,
14120 SE_D,
14121 SE_Q,
14122 SE_I,
14123 SE_S,
14124 SE_R,
14125 SE_L
14126 };
14127
14128 /* Register widths of above. */
14129 static unsigned neon_shape_el_size[] =
14130 {
14131 16,
14132 32,
14133 64,
14134 128,
14135 0,
14136 32,
14137 32,
14138 0
14139 };
14140
14141 struct neon_shape_info
14142 {
14143 unsigned els;
14144 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14145 };
14146
14147 #define S2(A,B) { SE_##A, SE_##B }
14148 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14149 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14150
14151 #define X(N, L, C) { N, S##N L }
14152
14153 static struct neon_shape_info neon_shape_tab[] =
14154 {
14155 NEON_SHAPE_DEF
14156 };
14157
14158 #undef X
14159 #undef S2
14160 #undef S3
14161 #undef S4
14162
14163 /* Bit masks used in type checking given instructions.
14164 'N_EQK' means the type must be the same as (or based on in some way) the key
14165 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14166 set, various other bits can be set as well in order to modify the meaning of
14167 the type constraint. */
14168
14169 enum neon_type_mask
14170 {
14171 N_S8 = 0x0000001,
14172 N_S16 = 0x0000002,
14173 N_S32 = 0x0000004,
14174 N_S64 = 0x0000008,
14175 N_U8 = 0x0000010,
14176 N_U16 = 0x0000020,
14177 N_U32 = 0x0000040,
14178 N_U64 = 0x0000080,
14179 N_I8 = 0x0000100,
14180 N_I16 = 0x0000200,
14181 N_I32 = 0x0000400,
14182 N_I64 = 0x0000800,
14183 N_8 = 0x0001000,
14184 N_16 = 0x0002000,
14185 N_32 = 0x0004000,
14186 N_64 = 0x0008000,
14187 N_P8 = 0x0010000,
14188 N_P16 = 0x0020000,
14189 N_F16 = 0x0040000,
14190 N_F32 = 0x0080000,
14191 N_F64 = 0x0100000,
14192 N_P64 = 0x0200000,
14193 N_KEY = 0x1000000, /* Key element (main type specifier). */
14194 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14195 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14196 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14197 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14198 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14199 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14200 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14201 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14202 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14203 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14204 N_UTYP = 0,
14205 N_MAX_NONSPECIAL = N_P64
14206 };
14207
14208 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14209
14210 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14211 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14212 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14213 #define N_S_32 (N_S8 | N_S16 | N_S32)
14214 #define N_F_16_32 (N_F16 | N_F32)
14215 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14216 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14217 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14218 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14219 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14220 #define N_F_MVE (N_F16 | N_F32)
14221 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14222
14223 /* Pass this as the first type argument to neon_check_type to ignore types
14224 altogether. */
14225 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14226
14227 /* Select a "shape" for the current instruction (describing register types or
14228 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14229 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14230 function of operand parsing, so this function doesn't need to be called.
14231 Shapes should be listed in order of decreasing length. */
14232
14233 static enum neon_shape
14234 neon_select_shape (enum neon_shape shape, ...)
14235 {
14236 va_list ap;
14237 enum neon_shape first_shape = shape;
14238
14239 /* Fix missing optional operands. FIXME: we don't know at this point how
14240 many arguments we should have, so this makes the assumption that we have
14241 > 1. This is true of all current Neon opcodes, I think, but may not be
14242 true in the future. */
14243 if (!inst.operands[1].present)
14244 inst.operands[1] = inst.operands[0];
14245
14246 va_start (ap, shape);
14247
14248 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14249 {
14250 unsigned j;
14251 int matches = 1;
14252
14253 for (j = 0; j < neon_shape_tab[shape].els; j++)
14254 {
14255 if (!inst.operands[j].present)
14256 {
14257 matches = 0;
14258 break;
14259 }
14260
14261 switch (neon_shape_tab[shape].el[j])
14262 {
14263 /* If a .f16, .16, .u16, .s16 type specifier is given over
14264 a VFP single precision register operand, it's essentially
14265 means only half of the register is used.
14266
14267 If the type specifier is given after the mnemonics, the
14268 information is stored in inst.vectype. If the type specifier
14269 is given after register operand, the information is stored
14270 in inst.operands[].vectype.
14271
14272 When there is only one type specifier, and all the register
14273 operands are the same type of hardware register, the type
14274 specifier applies to all register operands.
14275
14276 If no type specifier is given, the shape is inferred from
14277 operand information.
14278
14279 for example:
14280 vadd.f16 s0, s1, s2: NS_HHH
14281 vabs.f16 s0, s1: NS_HH
14282 vmov.f16 s0, r1: NS_HR
14283 vmov.f16 r0, s1: NS_RH
14284 vcvt.f16 r0, s1: NS_RH
14285 vcvt.f16.s32 s2, s2, #29: NS_HFI
14286 vcvt.f16.s32 s2, s2: NS_HF
14287 */
14288 case SE_H:
14289 if (!(inst.operands[j].isreg
14290 && inst.operands[j].isvec
14291 && inst.operands[j].issingle
14292 && !inst.operands[j].isquad
14293 && ((inst.vectype.elems == 1
14294 && inst.vectype.el[0].size == 16)
14295 || (inst.vectype.elems > 1
14296 && inst.vectype.el[j].size == 16)
14297 || (inst.vectype.elems == 0
14298 && inst.operands[j].vectype.type != NT_invtype
14299 && inst.operands[j].vectype.size == 16))))
14300 matches = 0;
14301 break;
14302
14303 case SE_F:
14304 if (!(inst.operands[j].isreg
14305 && inst.operands[j].isvec
14306 && inst.operands[j].issingle
14307 && !inst.operands[j].isquad
14308 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14309 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14310 || (inst.vectype.elems == 0
14311 && (inst.operands[j].vectype.size == 32
14312 || inst.operands[j].vectype.type == NT_invtype)))))
14313 matches = 0;
14314 break;
14315
14316 case SE_D:
14317 if (!(inst.operands[j].isreg
14318 && inst.operands[j].isvec
14319 && !inst.operands[j].isquad
14320 && !inst.operands[j].issingle))
14321 matches = 0;
14322 break;
14323
14324 case SE_R:
14325 if (!(inst.operands[j].isreg
14326 && !inst.operands[j].isvec))
14327 matches = 0;
14328 break;
14329
14330 case SE_Q:
14331 if (!(inst.operands[j].isreg
14332 && inst.operands[j].isvec
14333 && inst.operands[j].isquad
14334 && !inst.operands[j].issingle))
14335 matches = 0;
14336 break;
14337
14338 case SE_I:
14339 if (!(!inst.operands[j].isreg
14340 && !inst.operands[j].isscalar))
14341 matches = 0;
14342 break;
14343
14344 case SE_S:
14345 if (!(!inst.operands[j].isreg
14346 && inst.operands[j].isscalar))
14347 matches = 0;
14348 break;
14349
14350 case SE_L:
14351 break;
14352 }
14353 if (!matches)
14354 break;
14355 }
14356 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14357 /* We've matched all the entries in the shape table, and we don't
14358 have any left over operands which have not been matched. */
14359 break;
14360 }
14361
14362 va_end (ap);
14363
14364 if (shape == NS_NULL && first_shape != NS_NULL)
14365 first_error (_("invalid instruction shape"));
14366
14367 return shape;
14368 }
14369
14370 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14371 means the Q bit should be set). */
14372
14373 static int
14374 neon_quad (enum neon_shape shape)
14375 {
14376 return neon_shape_class[shape] == SC_QUAD;
14377 }
14378
14379 static void
14380 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14381 unsigned *g_size)
14382 {
14383 /* Allow modification to be made to types which are constrained to be
14384 based on the key element, based on bits set alongside N_EQK. */
14385 if ((typebits & N_EQK) != 0)
14386 {
14387 if ((typebits & N_HLF) != 0)
14388 *g_size /= 2;
14389 else if ((typebits & N_DBL) != 0)
14390 *g_size *= 2;
14391 if ((typebits & N_SGN) != 0)
14392 *g_type = NT_signed;
14393 else if ((typebits & N_UNS) != 0)
14394 *g_type = NT_unsigned;
14395 else if ((typebits & N_INT) != 0)
14396 *g_type = NT_integer;
14397 else if ((typebits & N_FLT) != 0)
14398 *g_type = NT_float;
14399 else if ((typebits & N_SIZ) != 0)
14400 *g_type = NT_untyped;
14401 }
14402 }
14403
14404 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14405 operand type, i.e. the single type specified in a Neon instruction when it
14406 is the only one given. */
14407
14408 static struct neon_type_el
14409 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14410 {
14411 struct neon_type_el dest = *key;
14412
14413 gas_assert ((thisarg & N_EQK) != 0);
14414
14415 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14416
14417 return dest;
14418 }
14419
14420 /* Convert Neon type and size into compact bitmask representation. */
14421
14422 static enum neon_type_mask
14423 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14424 {
14425 switch (type)
14426 {
14427 case NT_untyped:
14428 switch (size)
14429 {
14430 case 8: return N_8;
14431 case 16: return N_16;
14432 case 32: return N_32;
14433 case 64: return N_64;
14434 default: ;
14435 }
14436 break;
14437
14438 case NT_integer:
14439 switch (size)
14440 {
14441 case 8: return N_I8;
14442 case 16: return N_I16;
14443 case 32: return N_I32;
14444 case 64: return N_I64;
14445 default: ;
14446 }
14447 break;
14448
14449 case NT_float:
14450 switch (size)
14451 {
14452 case 16: return N_F16;
14453 case 32: return N_F32;
14454 case 64: return N_F64;
14455 default: ;
14456 }
14457 break;
14458
14459 case NT_poly:
14460 switch (size)
14461 {
14462 case 8: return N_P8;
14463 case 16: return N_P16;
14464 case 64: return N_P64;
14465 default: ;
14466 }
14467 break;
14468
14469 case NT_signed:
14470 switch (size)
14471 {
14472 case 8: return N_S8;
14473 case 16: return N_S16;
14474 case 32: return N_S32;
14475 case 64: return N_S64;
14476 default: ;
14477 }
14478 break;
14479
14480 case NT_unsigned:
14481 switch (size)
14482 {
14483 case 8: return N_U8;
14484 case 16: return N_U16;
14485 case 32: return N_U32;
14486 case 64: return N_U64;
14487 default: ;
14488 }
14489 break;
14490
14491 default: ;
14492 }
14493
14494 return N_UTYP;
14495 }
14496
14497 /* Convert compact Neon bitmask type representation to a type and size. Only
14498 handles the case where a single bit is set in the mask. */
14499
14500 static int
14501 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14502 enum neon_type_mask mask)
14503 {
14504 if ((mask & N_EQK) != 0)
14505 return FAIL;
14506
14507 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14508 *size = 8;
14509 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14510 *size = 16;
14511 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14512 *size = 32;
14513 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14514 *size = 64;
14515 else
14516 return FAIL;
14517
14518 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14519 *type = NT_signed;
14520 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14521 *type = NT_unsigned;
14522 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14523 *type = NT_integer;
14524 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14525 *type = NT_untyped;
14526 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14527 *type = NT_poly;
14528 else if ((mask & (N_F_ALL)) != 0)
14529 *type = NT_float;
14530 else
14531 return FAIL;
14532
14533 return SUCCESS;
14534 }
14535
14536 /* Modify a bitmask of allowed types. This is only needed for type
14537 relaxation. */
14538
14539 static unsigned
14540 modify_types_allowed (unsigned allowed, unsigned mods)
14541 {
14542 unsigned size;
14543 enum neon_el_type type;
14544 unsigned destmask;
14545 int i;
14546
14547 destmask = 0;
14548
14549 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14550 {
14551 if (el_type_of_type_chk (&type, &size,
14552 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14553 {
14554 neon_modify_type_size (mods, &type, &size);
14555 destmask |= type_chk_of_el_type (type, size);
14556 }
14557 }
14558
14559 return destmask;
14560 }
14561
14562 /* Check type and return type classification.
14563 The manual states (paraphrase): If one datatype is given, it indicates the
14564 type given in:
14565 - the second operand, if there is one
14566 - the operand, if there is no second operand
14567 - the result, if there are no operands.
14568 This isn't quite good enough though, so we use a concept of a "key" datatype
14569 which is set on a per-instruction basis, which is the one which matters when
14570 only one data type is written.
14571 Note: this function has side-effects (e.g. filling in missing operands). All
14572 Neon instructions should call it before performing bit encoding. */
14573
14574 static struct neon_type_el
14575 neon_check_type (unsigned els, enum neon_shape ns, ...)
14576 {
14577 va_list ap;
14578 unsigned i, pass, key_el = 0;
14579 unsigned types[NEON_MAX_TYPE_ELS];
14580 enum neon_el_type k_type = NT_invtype;
14581 unsigned k_size = -1u;
14582 struct neon_type_el badtype = {NT_invtype, -1};
14583 unsigned key_allowed = 0;
14584
14585 /* Optional registers in Neon instructions are always (not) in operand 1.
14586 Fill in the missing operand here, if it was omitted. */
14587 if (els > 1 && !inst.operands[1].present)
14588 inst.operands[1] = inst.operands[0];
14589
14590 /* Suck up all the varargs. */
14591 va_start (ap, ns);
14592 for (i = 0; i < els; i++)
14593 {
14594 unsigned thisarg = va_arg (ap, unsigned);
14595 if (thisarg == N_IGNORE_TYPE)
14596 {
14597 va_end (ap);
14598 return badtype;
14599 }
14600 types[i] = thisarg;
14601 if ((thisarg & N_KEY) != 0)
14602 key_el = i;
14603 }
14604 va_end (ap);
14605
14606 if (inst.vectype.elems > 0)
14607 for (i = 0; i < els; i++)
14608 if (inst.operands[i].vectype.type != NT_invtype)
14609 {
14610 first_error (_("types specified in both the mnemonic and operands"));
14611 return badtype;
14612 }
14613
14614 /* Duplicate inst.vectype elements here as necessary.
14615 FIXME: No idea if this is exactly the same as the ARM assembler,
14616 particularly when an insn takes one register and one non-register
14617 operand. */
14618 if (inst.vectype.elems == 1 && els > 1)
14619 {
14620 unsigned j;
14621 inst.vectype.elems = els;
14622 inst.vectype.el[key_el] = inst.vectype.el[0];
14623 for (j = 0; j < els; j++)
14624 if (j != key_el)
14625 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14626 types[j]);
14627 }
14628 else if (inst.vectype.elems == 0 && els > 0)
14629 {
14630 unsigned j;
14631 /* No types were given after the mnemonic, so look for types specified
14632 after each operand. We allow some flexibility here; as long as the
14633 "key" operand has a type, we can infer the others. */
14634 for (j = 0; j < els; j++)
14635 if (inst.operands[j].vectype.type != NT_invtype)
14636 inst.vectype.el[j] = inst.operands[j].vectype;
14637
14638 if (inst.operands[key_el].vectype.type != NT_invtype)
14639 {
14640 for (j = 0; j < els; j++)
14641 if (inst.operands[j].vectype.type == NT_invtype)
14642 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14643 types[j]);
14644 }
14645 else
14646 {
14647 first_error (_("operand types can't be inferred"));
14648 return badtype;
14649 }
14650 }
14651 else if (inst.vectype.elems != els)
14652 {
14653 first_error (_("type specifier has the wrong number of parts"));
14654 return badtype;
14655 }
14656
14657 for (pass = 0; pass < 2; pass++)
14658 {
14659 for (i = 0; i < els; i++)
14660 {
14661 unsigned thisarg = types[i];
14662 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14663 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14664 enum neon_el_type g_type = inst.vectype.el[i].type;
14665 unsigned g_size = inst.vectype.el[i].size;
14666
14667 /* Decay more-specific signed & unsigned types to sign-insensitive
14668 integer types if sign-specific variants are unavailable. */
14669 if ((g_type == NT_signed || g_type == NT_unsigned)
14670 && (types_allowed & N_SU_ALL) == 0)
14671 g_type = NT_integer;
14672
14673 /* If only untyped args are allowed, decay any more specific types to
14674 them. Some instructions only care about signs for some element
14675 sizes, so handle that properly. */
14676 if (((types_allowed & N_UNT) == 0)
14677 && ((g_size == 8 && (types_allowed & N_8) != 0)
14678 || (g_size == 16 && (types_allowed & N_16) != 0)
14679 || (g_size == 32 && (types_allowed & N_32) != 0)
14680 || (g_size == 64 && (types_allowed & N_64) != 0)))
14681 g_type = NT_untyped;
14682
14683 if (pass == 0)
14684 {
14685 if ((thisarg & N_KEY) != 0)
14686 {
14687 k_type = g_type;
14688 k_size = g_size;
14689 key_allowed = thisarg & ~N_KEY;
14690
14691 /* Check architecture constraint on FP16 extension. */
14692 if (k_size == 16
14693 && k_type == NT_float
14694 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14695 {
14696 inst.error = _(BAD_FP16);
14697 return badtype;
14698 }
14699 }
14700 }
14701 else
14702 {
14703 if ((thisarg & N_VFP) != 0)
14704 {
14705 enum neon_shape_el regshape;
14706 unsigned regwidth, match;
14707
14708 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14709 if (ns == NS_NULL)
14710 {
14711 first_error (_("invalid instruction shape"));
14712 return badtype;
14713 }
14714 regshape = neon_shape_tab[ns].el[i];
14715 regwidth = neon_shape_el_size[regshape];
14716
14717 /* In VFP mode, operands must match register widths. If we
14718 have a key operand, use its width, else use the width of
14719 the current operand. */
14720 if (k_size != -1u)
14721 match = k_size;
14722 else
14723 match = g_size;
14724
14725 /* FP16 will use a single precision register. */
14726 if (regwidth == 32 && match == 16)
14727 {
14728 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14729 match = regwidth;
14730 else
14731 {
14732 inst.error = _(BAD_FP16);
14733 return badtype;
14734 }
14735 }
14736
14737 if (regwidth != match)
14738 {
14739 first_error (_("operand size must match register width"));
14740 return badtype;
14741 }
14742 }
14743
14744 if ((thisarg & N_EQK) == 0)
14745 {
14746 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14747
14748 if ((given_type & types_allowed) == 0)
14749 {
14750 first_error (BAD_SIMD_TYPE);
14751 return badtype;
14752 }
14753 }
14754 else
14755 {
14756 enum neon_el_type mod_k_type = k_type;
14757 unsigned mod_k_size = k_size;
14758 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14759 if (g_type != mod_k_type || g_size != mod_k_size)
14760 {
14761 first_error (_("inconsistent types in Neon instruction"));
14762 return badtype;
14763 }
14764 }
14765 }
14766 }
14767 }
14768
14769 return inst.vectype.el[key_el];
14770 }
14771
14772 /* Neon-style VFP instruction forwarding. */
14773
14774 /* Thumb VFP instructions have 0xE in the condition field. */
14775
14776 static void
14777 do_vfp_cond_or_thumb (void)
14778 {
14779 inst.is_neon = 1;
14780
14781 if (thumb_mode)
14782 inst.instruction |= 0xe0000000;
14783 else
14784 inst.instruction |= inst.cond << 28;
14785 }
14786
14787 /* Look up and encode a simple mnemonic, for use as a helper function for the
14788 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14789 etc. It is assumed that operand parsing has already been done, and that the
14790 operands are in the form expected by the given opcode (this isn't necessarily
14791 the same as the form in which they were parsed, hence some massaging must
14792 take place before this function is called).
14793 Checks current arch version against that in the looked-up opcode. */
14794
14795 static void
14796 do_vfp_nsyn_opcode (const char *opname)
14797 {
14798 const struct asm_opcode *opcode;
14799
14800 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14801
14802 if (!opcode)
14803 abort ();
14804
14805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14806 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14807 _(BAD_FPU));
14808
14809 inst.is_neon = 1;
14810
14811 if (thumb_mode)
14812 {
14813 inst.instruction = opcode->tvalue;
14814 opcode->tencode ();
14815 }
14816 else
14817 {
14818 inst.instruction = (inst.cond << 28) | opcode->avalue;
14819 opcode->aencode ();
14820 }
14821 }
14822
14823 static void
14824 do_vfp_nsyn_add_sub (enum neon_shape rs)
14825 {
14826 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14827
14828 if (rs == NS_FFF || rs == NS_HHH)
14829 {
14830 if (is_add)
14831 do_vfp_nsyn_opcode ("fadds");
14832 else
14833 do_vfp_nsyn_opcode ("fsubs");
14834
14835 /* ARMv8.2 fp16 instruction. */
14836 if (rs == NS_HHH)
14837 do_scalar_fp16_v82_encode ();
14838 }
14839 else
14840 {
14841 if (is_add)
14842 do_vfp_nsyn_opcode ("faddd");
14843 else
14844 do_vfp_nsyn_opcode ("fsubd");
14845 }
14846 }
14847
14848 /* Check operand types to see if this is a VFP instruction, and if so call
14849 PFN (). */
14850
14851 static int
14852 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14853 {
14854 enum neon_shape rs;
14855 struct neon_type_el et;
14856
14857 switch (args)
14858 {
14859 case 2:
14860 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14861 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14862 break;
14863
14864 case 3:
14865 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14866 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14867 N_F_ALL | N_KEY | N_VFP);
14868 break;
14869
14870 default:
14871 abort ();
14872 }
14873
14874 if (et.type != NT_invtype)
14875 {
14876 pfn (rs);
14877 return SUCCESS;
14878 }
14879
14880 inst.error = NULL;
14881 return FAIL;
14882 }
14883
14884 static void
14885 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14886 {
14887 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14888
14889 if (rs == NS_FFF || rs == NS_HHH)
14890 {
14891 if (is_mla)
14892 do_vfp_nsyn_opcode ("fmacs");
14893 else
14894 do_vfp_nsyn_opcode ("fnmacs");
14895
14896 /* ARMv8.2 fp16 instruction. */
14897 if (rs == NS_HHH)
14898 do_scalar_fp16_v82_encode ();
14899 }
14900 else
14901 {
14902 if (is_mla)
14903 do_vfp_nsyn_opcode ("fmacd");
14904 else
14905 do_vfp_nsyn_opcode ("fnmacd");
14906 }
14907 }
14908
14909 static void
14910 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14911 {
14912 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14913
14914 if (rs == NS_FFF || rs == NS_HHH)
14915 {
14916 if (is_fma)
14917 do_vfp_nsyn_opcode ("ffmas");
14918 else
14919 do_vfp_nsyn_opcode ("ffnmas");
14920
14921 /* ARMv8.2 fp16 instruction. */
14922 if (rs == NS_HHH)
14923 do_scalar_fp16_v82_encode ();
14924 }
14925 else
14926 {
14927 if (is_fma)
14928 do_vfp_nsyn_opcode ("ffmad");
14929 else
14930 do_vfp_nsyn_opcode ("ffnmad");
14931 }
14932 }
14933
14934 static void
14935 do_vfp_nsyn_mul (enum neon_shape rs)
14936 {
14937 if (rs == NS_FFF || rs == NS_HHH)
14938 {
14939 do_vfp_nsyn_opcode ("fmuls");
14940
14941 /* ARMv8.2 fp16 instruction. */
14942 if (rs == NS_HHH)
14943 do_scalar_fp16_v82_encode ();
14944 }
14945 else
14946 do_vfp_nsyn_opcode ("fmuld");
14947 }
14948
14949 static void
14950 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14951 {
14952 int is_neg = (inst.instruction & 0x80) != 0;
14953 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14954
14955 if (rs == NS_FF || rs == NS_HH)
14956 {
14957 if (is_neg)
14958 do_vfp_nsyn_opcode ("fnegs");
14959 else
14960 do_vfp_nsyn_opcode ("fabss");
14961
14962 /* ARMv8.2 fp16 instruction. */
14963 if (rs == NS_HH)
14964 do_scalar_fp16_v82_encode ();
14965 }
14966 else
14967 {
14968 if (is_neg)
14969 do_vfp_nsyn_opcode ("fnegd");
14970 else
14971 do_vfp_nsyn_opcode ("fabsd");
14972 }
14973 }
14974
14975 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14976 insns belong to Neon, and are handled elsewhere. */
14977
14978 static void
14979 do_vfp_nsyn_ldm_stm (int is_dbmode)
14980 {
14981 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14982 if (is_ldm)
14983 {
14984 if (is_dbmode)
14985 do_vfp_nsyn_opcode ("fldmdbs");
14986 else
14987 do_vfp_nsyn_opcode ("fldmias");
14988 }
14989 else
14990 {
14991 if (is_dbmode)
14992 do_vfp_nsyn_opcode ("fstmdbs");
14993 else
14994 do_vfp_nsyn_opcode ("fstmias");
14995 }
14996 }
14997
14998 static void
14999 do_vfp_nsyn_sqrt (void)
15000 {
15001 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15002 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15003
15004 if (rs == NS_FF || rs == NS_HH)
15005 {
15006 do_vfp_nsyn_opcode ("fsqrts");
15007
15008 /* ARMv8.2 fp16 instruction. */
15009 if (rs == NS_HH)
15010 do_scalar_fp16_v82_encode ();
15011 }
15012 else
15013 do_vfp_nsyn_opcode ("fsqrtd");
15014 }
15015
15016 static void
15017 do_vfp_nsyn_div (void)
15018 {
15019 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15020 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15021 N_F_ALL | N_KEY | N_VFP);
15022
15023 if (rs == NS_FFF || rs == NS_HHH)
15024 {
15025 do_vfp_nsyn_opcode ("fdivs");
15026
15027 /* ARMv8.2 fp16 instruction. */
15028 if (rs == NS_HHH)
15029 do_scalar_fp16_v82_encode ();
15030 }
15031 else
15032 do_vfp_nsyn_opcode ("fdivd");
15033 }
15034
15035 static void
15036 do_vfp_nsyn_nmul (void)
15037 {
15038 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15039 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15040 N_F_ALL | N_KEY | N_VFP);
15041
15042 if (rs == NS_FFF || rs == NS_HHH)
15043 {
15044 NEON_ENCODE (SINGLE, inst);
15045 do_vfp_sp_dyadic ();
15046
15047 /* ARMv8.2 fp16 instruction. */
15048 if (rs == NS_HHH)
15049 do_scalar_fp16_v82_encode ();
15050 }
15051 else
15052 {
15053 NEON_ENCODE (DOUBLE, inst);
15054 do_vfp_dp_rd_rn_rm ();
15055 }
15056 do_vfp_cond_or_thumb ();
15057
15058 }
15059
15060 static void
15061 do_vfp_nsyn_cmp (void)
15062 {
15063 enum neon_shape rs;
15064 if (inst.operands[1].isreg)
15065 {
15066 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15067 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15068
15069 if (rs == NS_FF || rs == NS_HH)
15070 {
15071 NEON_ENCODE (SINGLE, inst);
15072 do_vfp_sp_monadic ();
15073 }
15074 else
15075 {
15076 NEON_ENCODE (DOUBLE, inst);
15077 do_vfp_dp_rd_rm ();
15078 }
15079 }
15080 else
15081 {
15082 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
15083 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
15084
15085 switch (inst.instruction & 0x0fffffff)
15086 {
15087 case N_MNEM_vcmp:
15088 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
15089 break;
15090 case N_MNEM_vcmpe:
15091 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
15092 break;
15093 default:
15094 abort ();
15095 }
15096
15097 if (rs == NS_FI || rs == NS_HI)
15098 {
15099 NEON_ENCODE (SINGLE, inst);
15100 do_vfp_sp_compare_z ();
15101 }
15102 else
15103 {
15104 NEON_ENCODE (DOUBLE, inst);
15105 do_vfp_dp_rd ();
15106 }
15107 }
15108 do_vfp_cond_or_thumb ();
15109
15110 /* ARMv8.2 fp16 instruction. */
15111 if (rs == NS_HI || rs == NS_HH)
15112 do_scalar_fp16_v82_encode ();
15113 }
15114
15115 static void
15116 nsyn_insert_sp (void)
15117 {
15118 inst.operands[1] = inst.operands[0];
15119 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
15120 inst.operands[0].reg = REG_SP;
15121 inst.operands[0].isreg = 1;
15122 inst.operands[0].writeback = 1;
15123 inst.operands[0].present = 1;
15124 }
15125
15126 static void
15127 do_vfp_nsyn_push (void)
15128 {
15129 nsyn_insert_sp ();
15130
15131 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15132 _("register list must contain at least 1 and at most 16 "
15133 "registers"));
15134
15135 if (inst.operands[1].issingle)
15136 do_vfp_nsyn_opcode ("fstmdbs");
15137 else
15138 do_vfp_nsyn_opcode ("fstmdbd");
15139 }
15140
15141 static void
15142 do_vfp_nsyn_pop (void)
15143 {
15144 nsyn_insert_sp ();
15145
15146 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15147 _("register list must contain at least 1 and at most 16 "
15148 "registers"));
15149
15150 if (inst.operands[1].issingle)
15151 do_vfp_nsyn_opcode ("fldmias");
15152 else
15153 do_vfp_nsyn_opcode ("fldmiad");
15154 }
15155
15156 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15157 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15158
15159 static void
15160 neon_dp_fixup (struct arm_it* insn)
15161 {
15162 unsigned int i = insn->instruction;
15163 insn->is_neon = 1;
15164
15165 if (thumb_mode)
15166 {
15167 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15168 if (i & (1 << 24))
15169 i |= 1 << 28;
15170
15171 i &= ~(1 << 24);
15172
15173 i |= 0xef000000;
15174 }
15175 else
15176 i |= 0xf2000000;
15177
15178 insn->instruction = i;
15179 }
15180
15181 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15182 (0, 1, 2, 3). */
15183
15184 static unsigned
15185 neon_logbits (unsigned x)
15186 {
15187 return ffs (x) - 4;
15188 }
15189
15190 #define LOW4(R) ((R) & 0xf)
15191 #define HI1(R) (((R) >> 4) & 1)
15192
15193 static void
15194 mve_encode_qqr (int size, int fp)
15195 {
15196 if (inst.operands[2].reg == REG_SP)
15197 as_tsktsk (MVE_BAD_SP);
15198 else if (inst.operands[2].reg == REG_PC)
15199 as_tsktsk (MVE_BAD_PC);
15200
15201 if (fp)
15202 {
15203 /* vadd. */
15204 if (((unsigned)inst.instruction) == 0xd00)
15205 inst.instruction = 0xee300f40;
15206 /* vsub. */
15207 else if (((unsigned)inst.instruction) == 0x200d00)
15208 inst.instruction = 0xee301f40;
15209
15210 /* Setting size which is 1 for F16 and 0 for F32. */
15211 inst.instruction |= (size == 16) << 28;
15212 }
15213 else
15214 {
15215 /* vadd. */
15216 if (((unsigned)inst.instruction) == 0x800)
15217 inst.instruction = 0xee010f40;
15218 /* vsub. */
15219 else if (((unsigned)inst.instruction) == 0x1000800)
15220 inst.instruction = 0xee011f40;
15221 /* Setting bits for size. */
15222 inst.instruction |= neon_logbits (size) << 20;
15223 }
15224 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15225 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15226 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15227 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15228 inst.instruction |= inst.operands[2].reg;
15229 inst.is_neon = 1;
15230 }
15231
15232 static void
15233 mve_encode_rqq (unsigned bit28, unsigned size)
15234 {
15235 inst.instruction |= bit28 << 28;
15236 inst.instruction |= neon_logbits (size) << 20;
15237 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15238 inst.instruction |= inst.operands[0].reg << 12;
15239 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15240 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15241 inst.instruction |= LOW4 (inst.operands[2].reg);
15242 inst.is_neon = 1;
15243 }
15244
15245 static void
15246 mve_encode_qqq (int ubit, int size)
15247 {
15248
15249 inst.instruction |= (ubit != 0) << 28;
15250 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15251 inst.instruction |= neon_logbits (size) << 20;
15252 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15253 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15254 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15255 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15256 inst.instruction |= LOW4 (inst.operands[2].reg);
15257
15258 inst.is_neon = 1;
15259 }
15260
15261
15262 /* Encode insns with bit pattern:
15263
15264 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15265 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15266
15267 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15268 different meaning for some instruction. */
15269
15270 static void
15271 neon_three_same (int isquad, int ubit, int size)
15272 {
15273 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15274 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15275 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15276 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15277 inst.instruction |= LOW4 (inst.operands[2].reg);
15278 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15279 inst.instruction |= (isquad != 0) << 6;
15280 inst.instruction |= (ubit != 0) << 24;
15281 if (size != -1)
15282 inst.instruction |= neon_logbits (size) << 20;
15283
15284 neon_dp_fixup (&inst);
15285 }
15286
15287 /* Encode instructions of the form:
15288
15289 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15290 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15291
15292 Don't write size if SIZE == -1. */
15293
15294 static void
15295 neon_two_same (int qbit, int ubit, int size)
15296 {
15297 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15298 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15299 inst.instruction |= LOW4 (inst.operands[1].reg);
15300 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15301 inst.instruction |= (qbit != 0) << 6;
15302 inst.instruction |= (ubit != 0) << 24;
15303
15304 if (size != -1)
15305 inst.instruction |= neon_logbits (size) << 18;
15306
15307 neon_dp_fixup (&inst);
15308 }
15309
15310 /* Neon instruction encoders, in approximate order of appearance. */
15311
15312 static void
15313 do_neon_dyadic_i_su (void)
15314 {
15315 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15316 struct neon_type_el et = neon_check_type (3, rs,
15317 N_EQK, N_EQK, N_SU_32 | N_KEY);
15318 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15319 }
15320
15321 static void
15322 do_neon_dyadic_i64_su (void)
15323 {
15324 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15325 struct neon_type_el et = neon_check_type (3, rs,
15326 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15327 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15328 }
15329
15330 static void
15331 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15332 unsigned immbits)
15333 {
15334 unsigned size = et.size >> 3;
15335 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15336 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15337 inst.instruction |= LOW4 (inst.operands[1].reg);
15338 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15339 inst.instruction |= (isquad != 0) << 6;
15340 inst.instruction |= immbits << 16;
15341 inst.instruction |= (size >> 3) << 7;
15342 inst.instruction |= (size & 0x7) << 19;
15343 if (write_ubit)
15344 inst.instruction |= (uval != 0) << 24;
15345
15346 neon_dp_fixup (&inst);
15347 }
15348
15349 static void
15350 do_neon_shl_imm (void)
15351 {
15352 if (!inst.operands[2].isreg)
15353 {
15354 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15355 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15356 int imm = inst.operands[2].imm;
15357
15358 constraint (imm < 0 || (unsigned)imm >= et.size,
15359 _("immediate out of range for shift"));
15360 NEON_ENCODE (IMMED, inst);
15361 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15362 }
15363 else
15364 {
15365 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15366 struct neon_type_el et = neon_check_type (3, rs,
15367 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15368 unsigned int tmp;
15369
15370 /* VSHL/VQSHL 3-register variants have syntax such as:
15371 vshl.xx Dd, Dm, Dn
15372 whereas other 3-register operations encoded by neon_three_same have
15373 syntax like:
15374 vadd.xx Dd, Dn, Dm
15375 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15376 here. */
15377 tmp = inst.operands[2].reg;
15378 inst.operands[2].reg = inst.operands[1].reg;
15379 inst.operands[1].reg = tmp;
15380 NEON_ENCODE (INTEGER, inst);
15381 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15382 }
15383 }
15384
15385 static void
15386 do_neon_qshl_imm (void)
15387 {
15388 if (!inst.operands[2].isreg)
15389 {
15390 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15391 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15392 int imm = inst.operands[2].imm;
15393
15394 constraint (imm < 0 || (unsigned)imm >= et.size,
15395 _("immediate out of range for shift"));
15396 NEON_ENCODE (IMMED, inst);
15397 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15398 }
15399 else
15400 {
15401 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15402 struct neon_type_el et = neon_check_type (3, rs,
15403 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15404 unsigned int tmp;
15405
15406 /* See note in do_neon_shl_imm. */
15407 tmp = inst.operands[2].reg;
15408 inst.operands[2].reg = inst.operands[1].reg;
15409 inst.operands[1].reg = tmp;
15410 NEON_ENCODE (INTEGER, inst);
15411 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15412 }
15413 }
15414
15415 static void
15416 do_neon_rshl (void)
15417 {
15418 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15419 struct neon_type_el et = neon_check_type (3, rs,
15420 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15421 unsigned int tmp;
15422
15423 tmp = inst.operands[2].reg;
15424 inst.operands[2].reg = inst.operands[1].reg;
15425 inst.operands[1].reg = tmp;
15426 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15427 }
15428
15429 static int
15430 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15431 {
15432 /* Handle .I8 pseudo-instructions. */
15433 if (size == 8)
15434 {
15435 /* Unfortunately, this will make everything apart from zero out-of-range.
15436 FIXME is this the intended semantics? There doesn't seem much point in
15437 accepting .I8 if so. */
15438 immediate |= immediate << 8;
15439 size = 16;
15440 }
15441
15442 if (size >= 32)
15443 {
15444 if (immediate == (immediate & 0x000000ff))
15445 {
15446 *immbits = immediate;
15447 return 0x1;
15448 }
15449 else if (immediate == (immediate & 0x0000ff00))
15450 {
15451 *immbits = immediate >> 8;
15452 return 0x3;
15453 }
15454 else if (immediate == (immediate & 0x00ff0000))
15455 {
15456 *immbits = immediate >> 16;
15457 return 0x5;
15458 }
15459 else if (immediate == (immediate & 0xff000000))
15460 {
15461 *immbits = immediate >> 24;
15462 return 0x7;
15463 }
15464 if ((immediate & 0xffff) != (immediate >> 16))
15465 goto bad_immediate;
15466 immediate &= 0xffff;
15467 }
15468
15469 if (immediate == (immediate & 0x000000ff))
15470 {
15471 *immbits = immediate;
15472 return 0x9;
15473 }
15474 else if (immediate == (immediate & 0x0000ff00))
15475 {
15476 *immbits = immediate >> 8;
15477 return 0xb;
15478 }
15479
15480 bad_immediate:
15481 first_error (_("immediate value out of range"));
15482 return FAIL;
15483 }
15484
15485 static void
15486 do_neon_logic (void)
15487 {
15488 if (inst.operands[2].present && inst.operands[2].isreg)
15489 {
15490 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15491 neon_check_type (3, rs, N_IGNORE_TYPE);
15492 /* U bit and size field were set as part of the bitmask. */
15493 NEON_ENCODE (INTEGER, inst);
15494 neon_three_same (neon_quad (rs), 0, -1);
15495 }
15496 else
15497 {
15498 const int three_ops_form = (inst.operands[2].present
15499 && !inst.operands[2].isreg);
15500 const int immoperand = (three_ops_form ? 2 : 1);
15501 enum neon_shape rs = (three_ops_form
15502 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15503 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15504 struct neon_type_el et = neon_check_type (2, rs,
15505 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15506 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15507 unsigned immbits;
15508 int cmode;
15509
15510 if (et.type == NT_invtype)
15511 return;
15512
15513 if (three_ops_form)
15514 constraint (inst.operands[0].reg != inst.operands[1].reg,
15515 _("first and second operands shall be the same register"));
15516
15517 NEON_ENCODE (IMMED, inst);
15518
15519 immbits = inst.operands[immoperand].imm;
15520 if (et.size == 64)
15521 {
15522 /* .i64 is a pseudo-op, so the immediate must be a repeating
15523 pattern. */
15524 if (immbits != (inst.operands[immoperand].regisimm ?
15525 inst.operands[immoperand].reg : 0))
15526 {
15527 /* Set immbits to an invalid constant. */
15528 immbits = 0xdeadbeef;
15529 }
15530 }
15531
15532 switch (opcode)
15533 {
15534 case N_MNEM_vbic:
15535 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15536 break;
15537
15538 case N_MNEM_vorr:
15539 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15540 break;
15541
15542 case N_MNEM_vand:
15543 /* Pseudo-instruction for VBIC. */
15544 neon_invert_size (&immbits, 0, et.size);
15545 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15546 break;
15547
15548 case N_MNEM_vorn:
15549 /* Pseudo-instruction for VORR. */
15550 neon_invert_size (&immbits, 0, et.size);
15551 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15552 break;
15553
15554 default:
15555 abort ();
15556 }
15557
15558 if (cmode == FAIL)
15559 return;
15560
15561 inst.instruction |= neon_quad (rs) << 6;
15562 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15563 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15564 inst.instruction |= cmode << 8;
15565 neon_write_immbits (immbits);
15566
15567 neon_dp_fixup (&inst);
15568 }
15569 }
15570
15571 static void
15572 do_neon_bitfield (void)
15573 {
15574 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15575 neon_check_type (3, rs, N_IGNORE_TYPE);
15576 neon_three_same (neon_quad (rs), 0, -1);
15577 }
15578
15579 static void
15580 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15581 unsigned destbits)
15582 {
15583 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15584 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15585 types | N_KEY);
15586 if (et.type == NT_float)
15587 {
15588 NEON_ENCODE (FLOAT, inst);
15589 if (rs == NS_QQR)
15590 mve_encode_qqr (et.size, 1);
15591 else
15592 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15593 }
15594 else
15595 {
15596 NEON_ENCODE (INTEGER, inst);
15597 if (rs == NS_QQR)
15598 mve_encode_qqr (et.size, 0);
15599 else
15600 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15601 }
15602 }
15603
15604
15605 static void
15606 do_neon_dyadic_if_su_d (void)
15607 {
15608 /* This version only allow D registers, but that constraint is enforced during
15609 operand parsing so we don't need to do anything extra here. */
15610 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15611 }
15612
15613 static void
15614 do_neon_dyadic_if_i_d (void)
15615 {
15616 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15617 affected if we specify unsigned args. */
15618 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15619 }
15620
15621 enum vfp_or_neon_is_neon_bits
15622 {
15623 NEON_CHECK_CC = 1,
15624 NEON_CHECK_ARCH = 2,
15625 NEON_CHECK_ARCH8 = 4
15626 };
15627
15628 /* Call this function if an instruction which may have belonged to the VFP or
15629 Neon instruction sets, but turned out to be a Neon instruction (due to the
15630 operand types involved, etc.). We have to check and/or fix-up a couple of
15631 things:
15632
15633 - Make sure the user hasn't attempted to make a Neon instruction
15634 conditional.
15635 - Alter the value in the condition code field if necessary.
15636 - Make sure that the arch supports Neon instructions.
15637
15638 Which of these operations take place depends on bits from enum
15639 vfp_or_neon_is_neon_bits.
15640
15641 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15642 current instruction's condition is COND_ALWAYS, the condition field is
15643 changed to inst.uncond_value. This is necessary because instructions shared
15644 between VFP and Neon may be conditional for the VFP variants only, and the
15645 unconditional Neon version must have, e.g., 0xF in the condition field. */
15646
15647 static int
15648 vfp_or_neon_is_neon (unsigned check)
15649 {
15650 /* Conditions are always legal in Thumb mode (IT blocks). */
15651 if (!thumb_mode && (check & NEON_CHECK_CC))
15652 {
15653 if (inst.cond != COND_ALWAYS)
15654 {
15655 first_error (_(BAD_COND));
15656 return FAIL;
15657 }
15658 if (inst.uncond_value != -1)
15659 inst.instruction |= inst.uncond_value << 28;
15660 }
15661
15662
15663 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
15664 || ((check & NEON_CHECK_ARCH8)
15665 && !mark_feature_used (&fpu_neon_ext_armv8)))
15666 {
15667 first_error (_(BAD_FPU));
15668 return FAIL;
15669 }
15670
15671 return SUCCESS;
15672 }
15673
15674 static int
15675 check_simd_pred_availability (int fp, unsigned check)
15676 {
15677 if (inst.cond > COND_ALWAYS)
15678 {
15679 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15680 {
15681 inst.error = BAD_FPU;
15682 return 1;
15683 }
15684 inst.pred_insn_type = INSIDE_VPT_INSN;
15685 }
15686 else if (inst.cond < COND_ALWAYS)
15687 {
15688 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15689 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15690 else if (vfp_or_neon_is_neon (check) == FAIL)
15691 return 2;
15692 }
15693 else
15694 {
15695 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
15696 && vfp_or_neon_is_neon (check) == FAIL)
15697 return 3;
15698
15699 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15700 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15701 }
15702 return 0;
15703 }
15704
15705 static void
15706 do_neon_dyadic_if_su (void)
15707 {
15708 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15709 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
15710 N_SUF_32 | N_KEY);
15711
15712 if (check_simd_pred_availability (et.type == NT_float,
15713 NEON_CHECK_ARCH | NEON_CHECK_CC))
15714 return;
15715
15716 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15717 }
15718
15719 static void
15720 do_neon_addsub_if_i (void)
15721 {
15722 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
15723 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15724 return;
15725
15726 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15727 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
15728 N_EQK, N_IF_32 | N_I64 | N_KEY);
15729
15730 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
15731 /* If we are parsing Q registers and the element types match MVE, which NEON
15732 also supports, then we must check whether this is an instruction that can
15733 be used by both MVE/NEON. This distinction can be made based on whether
15734 they are predicated or not. */
15735 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
15736 {
15737 if (check_simd_pred_availability (et.type == NT_float,
15738 NEON_CHECK_ARCH | NEON_CHECK_CC))
15739 return;
15740 }
15741 else
15742 {
15743 /* If they are either in a D register or are using an unsupported. */
15744 if (rs != NS_QQR
15745 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15746 return;
15747 }
15748
15749 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15750 affected if we specify unsigned args. */
15751 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15752 }
15753
15754 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15755 result to be:
15756 V<op> A,B (A is operand 0, B is operand 2)
15757 to mean:
15758 V<op> A,B,A
15759 not:
15760 V<op> A,B,B
15761 so handle that case specially. */
15762
15763 static void
15764 neon_exchange_operands (void)
15765 {
15766 if (inst.operands[1].present)
15767 {
15768 void *scratch = xmalloc (sizeof (inst.operands[0]));
15769
15770 /* Swap operands[1] and operands[2]. */
15771 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15772 inst.operands[1] = inst.operands[2];
15773 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15774 free (scratch);
15775 }
15776 else
15777 {
15778 inst.operands[1] = inst.operands[2];
15779 inst.operands[2] = inst.operands[0];
15780 }
15781 }
15782
15783 static void
15784 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15785 {
15786 if (inst.operands[2].isreg)
15787 {
15788 if (invert)
15789 neon_exchange_operands ();
15790 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15791 }
15792 else
15793 {
15794 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15795 struct neon_type_el et = neon_check_type (2, rs,
15796 N_EQK | N_SIZ, immtypes | N_KEY);
15797
15798 NEON_ENCODE (IMMED, inst);
15799 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15800 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15801 inst.instruction |= LOW4 (inst.operands[1].reg);
15802 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15803 inst.instruction |= neon_quad (rs) << 6;
15804 inst.instruction |= (et.type == NT_float) << 10;
15805 inst.instruction |= neon_logbits (et.size) << 18;
15806
15807 neon_dp_fixup (&inst);
15808 }
15809 }
15810
15811 static void
15812 do_neon_cmp (void)
15813 {
15814 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15815 }
15816
15817 static void
15818 do_neon_cmp_inv (void)
15819 {
15820 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15821 }
15822
15823 static void
15824 do_neon_ceq (void)
15825 {
15826 neon_compare (N_IF_32, N_IF_32, FALSE);
15827 }
15828
15829 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15830 scalars, which are encoded in 5 bits, M : Rm.
15831 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15832 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15833 index in M.
15834
15835 Dot Product instructions are similar to multiply instructions except elsize
15836 should always be 32.
15837
15838 This function translates SCALAR, which is GAS's internal encoding of indexed
15839 scalar register, to raw encoding. There is also register and index range
15840 check based on ELSIZE. */
15841
15842 static unsigned
15843 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15844 {
15845 unsigned regno = NEON_SCALAR_REG (scalar);
15846 unsigned elno = NEON_SCALAR_INDEX (scalar);
15847
15848 switch (elsize)
15849 {
15850 case 16:
15851 if (regno > 7 || elno > 3)
15852 goto bad_scalar;
15853 return regno | (elno << 3);
15854
15855 case 32:
15856 if (regno > 15 || elno > 1)
15857 goto bad_scalar;
15858 return regno | (elno << 4);
15859
15860 default:
15861 bad_scalar:
15862 first_error (_("scalar out of range for multiply instruction"));
15863 }
15864
15865 return 0;
15866 }
15867
15868 /* Encode multiply / multiply-accumulate scalar instructions. */
15869
15870 static void
15871 neon_mul_mac (struct neon_type_el et, int ubit)
15872 {
15873 unsigned scalar;
15874
15875 /* Give a more helpful error message if we have an invalid type. */
15876 if (et.type == NT_invtype)
15877 return;
15878
15879 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15880 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15881 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15882 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15883 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15884 inst.instruction |= LOW4 (scalar);
15885 inst.instruction |= HI1 (scalar) << 5;
15886 inst.instruction |= (et.type == NT_float) << 8;
15887 inst.instruction |= neon_logbits (et.size) << 20;
15888 inst.instruction |= (ubit != 0) << 24;
15889
15890 neon_dp_fixup (&inst);
15891 }
15892
15893 static void
15894 do_neon_mac_maybe_scalar (void)
15895 {
15896 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15897 return;
15898
15899 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15900 return;
15901
15902 if (inst.operands[2].isscalar)
15903 {
15904 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15905 struct neon_type_el et = neon_check_type (3, rs,
15906 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15907 NEON_ENCODE (SCALAR, inst);
15908 neon_mul_mac (et, neon_quad (rs));
15909 }
15910 else
15911 {
15912 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15913 affected if we specify unsigned args. */
15914 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15915 }
15916 }
15917
15918 static void
15919 do_neon_fmac (void)
15920 {
15921 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15922 return;
15923
15924 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15925 return;
15926
15927 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15928 }
15929
15930 static void
15931 do_neon_tst (void)
15932 {
15933 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15934 struct neon_type_el et = neon_check_type (3, rs,
15935 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15936 neon_three_same (neon_quad (rs), 0, et.size);
15937 }
15938
15939 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15940 same types as the MAC equivalents. The polynomial type for this instruction
15941 is encoded the same as the integer type. */
15942
15943 static void
15944 do_neon_mul (void)
15945 {
15946 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15947 return;
15948
15949 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15950 return;
15951
15952 if (inst.operands[2].isscalar)
15953 do_neon_mac_maybe_scalar ();
15954 else
15955 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15956 }
15957
15958 static void
15959 do_neon_qdmulh (void)
15960 {
15961 if (inst.operands[2].isscalar)
15962 {
15963 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15964 struct neon_type_el et = neon_check_type (3, rs,
15965 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15966 NEON_ENCODE (SCALAR, inst);
15967 neon_mul_mac (et, neon_quad (rs));
15968 }
15969 else
15970 {
15971 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15972 struct neon_type_el et = neon_check_type (3, rs,
15973 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15974 NEON_ENCODE (INTEGER, inst);
15975 /* The U bit (rounding) comes from bit mask. */
15976 neon_three_same (neon_quad (rs), 0, et.size);
15977 }
15978 }
15979
15980 static void
15981 do_mve_vmull (void)
15982 {
15983
15984 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
15985 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
15986 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
15987 && inst.cond == COND_ALWAYS
15988 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
15989 {
15990 if (rs == NS_QQQ)
15991 {
15992
15993 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
15994 N_SUF_32 | N_F64 | N_P8
15995 | N_P16 | N_I_MVE | N_KEY);
15996 if (((et.type == NT_poly) && et.size == 8
15997 && ARM_CPU_IS_ANY (cpu_variant))
15998 || (et.type == NT_integer) || (et.type == NT_float))
15999 goto neon_vmul;
16000 }
16001 else
16002 goto neon_vmul;
16003 }
16004
16005 constraint (rs != NS_QQQ, BAD_FPU);
16006 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16007 N_SU_32 | N_P8 | N_P16 | N_KEY);
16008
16009 /* We are dealing with MVE's vmullt. */
16010 if (et.size == 32
16011 && (inst.operands[0].reg == inst.operands[1].reg
16012 || inst.operands[0].reg == inst.operands[2].reg))
16013 as_tsktsk (BAD_MVE_SRCDEST);
16014
16015 if (inst.cond > COND_ALWAYS)
16016 inst.pred_insn_type = INSIDE_VPT_INSN;
16017 else
16018 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16019
16020 if (et.type == NT_poly)
16021 mve_encode_qqq (neon_logbits (et.size), 64);
16022 else
16023 mve_encode_qqq (et.type == NT_unsigned, et.size);
16024
16025 return;
16026
16027 neon_vmul:
16028 inst.instruction = N_MNEM_vmul;
16029 inst.cond = 0xb;
16030 if (thumb_mode)
16031 inst.pred_insn_type = INSIDE_IT_INSN;
16032 do_neon_mul ();
16033 }
16034
16035 static void
16036 do_mve_vabav (void)
16037 {
16038 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16039
16040 if (rs == NS_NULL)
16041 return;
16042
16043 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16044 return;
16045
16046 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
16047 | N_S16 | N_S32 | N_U8 | N_U16
16048 | N_U32);
16049
16050 if (inst.cond > COND_ALWAYS)
16051 inst.pred_insn_type = INSIDE_VPT_INSN;
16052 else
16053 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16054
16055 mve_encode_rqq (et.type == NT_unsigned, et.size);
16056 }
16057
16058 static void
16059 do_mve_vmladav (void)
16060 {
16061 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16062 struct neon_type_el et = neon_check_type (3, rs,
16063 N_EQK, N_EQK, N_SU_MVE | N_KEY);
16064
16065 if (et.type == NT_unsigned
16066 && (inst.instruction == M_MNEM_vmladavx
16067 || inst.instruction == M_MNEM_vmladavax
16068 || inst.instruction == M_MNEM_vmlsdav
16069 || inst.instruction == M_MNEM_vmlsdava
16070 || inst.instruction == M_MNEM_vmlsdavx
16071 || inst.instruction == M_MNEM_vmlsdavax))
16072 first_error (BAD_SIMD_TYPE);
16073
16074 constraint (inst.operands[2].reg > 14,
16075 _("MVE vector register in the range [Q0..Q7] expected"));
16076
16077 if (inst.cond > COND_ALWAYS)
16078 inst.pred_insn_type = INSIDE_VPT_INSN;
16079 else
16080 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16081
16082 if (inst.instruction == M_MNEM_vmlsdav
16083 || inst.instruction == M_MNEM_vmlsdava
16084 || inst.instruction == M_MNEM_vmlsdavx
16085 || inst.instruction == M_MNEM_vmlsdavax)
16086 inst.instruction |= (et.size == 8) << 28;
16087 else
16088 inst.instruction |= (et.size == 8) << 8;
16089
16090 mve_encode_rqq (et.type == NT_unsigned, 64);
16091 inst.instruction |= (et.size == 32) << 16;
16092 }
16093
16094 static void
16095 do_neon_qrdmlah (void)
16096 {
16097 /* Check we're on the correct architecture. */
16098 if (!mark_feature_used (&fpu_neon_ext_armv8))
16099 inst.error =
16100 _("instruction form not available on this architecture.");
16101 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
16102 {
16103 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16104 record_feature_use (&fpu_neon_ext_v8_1);
16105 }
16106
16107 if (inst.operands[2].isscalar)
16108 {
16109 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16110 struct neon_type_el et = neon_check_type (3, rs,
16111 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16112 NEON_ENCODE (SCALAR, inst);
16113 neon_mul_mac (et, neon_quad (rs));
16114 }
16115 else
16116 {
16117 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16118 struct neon_type_el et = neon_check_type (3, rs,
16119 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16120 NEON_ENCODE (INTEGER, inst);
16121 /* The U bit (rounding) comes from bit mask. */
16122 neon_three_same (neon_quad (rs), 0, et.size);
16123 }
16124 }
16125
16126 static void
16127 do_neon_fcmp_absolute (void)
16128 {
16129 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16130 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16131 N_F_16_32 | N_KEY);
16132 /* Size field comes from bit mask. */
16133 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
16134 }
16135
16136 static void
16137 do_neon_fcmp_absolute_inv (void)
16138 {
16139 neon_exchange_operands ();
16140 do_neon_fcmp_absolute ();
16141 }
16142
16143 static void
16144 do_neon_step (void)
16145 {
16146 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16147 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16148 N_F_16_32 | N_KEY);
16149 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
16150 }
16151
16152 static void
16153 do_neon_abs_neg (void)
16154 {
16155 enum neon_shape rs;
16156 struct neon_type_el et;
16157
16158 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
16159 return;
16160
16161 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16162 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
16163
16164 if (check_simd_pred_availability (et.type == NT_float,
16165 NEON_CHECK_ARCH | NEON_CHECK_CC))
16166 return;
16167
16168 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16169 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16170 inst.instruction |= LOW4 (inst.operands[1].reg);
16171 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16172 inst.instruction |= neon_quad (rs) << 6;
16173 inst.instruction |= (et.type == NT_float) << 10;
16174 inst.instruction |= neon_logbits (et.size) << 18;
16175
16176 neon_dp_fixup (&inst);
16177 }
16178
16179 static void
16180 do_neon_sli (void)
16181 {
16182 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16183 struct neon_type_el et = neon_check_type (2, rs,
16184 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16185 int imm = inst.operands[2].imm;
16186 constraint (imm < 0 || (unsigned)imm >= et.size,
16187 _("immediate out of range for insert"));
16188 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16189 }
16190
16191 static void
16192 do_neon_sri (void)
16193 {
16194 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16195 struct neon_type_el et = neon_check_type (2, rs,
16196 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16197 int imm = inst.operands[2].imm;
16198 constraint (imm < 1 || (unsigned)imm > et.size,
16199 _("immediate out of range for insert"));
16200 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
16201 }
16202
16203 static void
16204 do_neon_qshlu_imm (void)
16205 {
16206 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16207 struct neon_type_el et = neon_check_type (2, rs,
16208 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
16209 int imm = inst.operands[2].imm;
16210 constraint (imm < 0 || (unsigned)imm >= et.size,
16211 _("immediate out of range for shift"));
16212 /* Only encodes the 'U present' variant of the instruction.
16213 In this case, signed types have OP (bit 8) set to 0.
16214 Unsigned types have OP set to 1. */
16215 inst.instruction |= (et.type == NT_unsigned) << 8;
16216 /* The rest of the bits are the same as other immediate shifts. */
16217 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16218 }
16219
16220 static void
16221 do_neon_qmovn (void)
16222 {
16223 struct neon_type_el et = neon_check_type (2, NS_DQ,
16224 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16225 /* Saturating move where operands can be signed or unsigned, and the
16226 destination has the same signedness. */
16227 NEON_ENCODE (INTEGER, inst);
16228 if (et.type == NT_unsigned)
16229 inst.instruction |= 0xc0;
16230 else
16231 inst.instruction |= 0x80;
16232 neon_two_same (0, 1, et.size / 2);
16233 }
16234
16235 static void
16236 do_neon_qmovun (void)
16237 {
16238 struct neon_type_el et = neon_check_type (2, NS_DQ,
16239 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16240 /* Saturating move with unsigned results. Operands must be signed. */
16241 NEON_ENCODE (INTEGER, inst);
16242 neon_two_same (0, 1, et.size / 2);
16243 }
16244
16245 static void
16246 do_neon_rshift_sat_narrow (void)
16247 {
16248 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16249 or unsigned. If operands are unsigned, results must also be unsigned. */
16250 struct neon_type_el et = neon_check_type (2, NS_DQI,
16251 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16252 int imm = inst.operands[2].imm;
16253 /* This gets the bounds check, size encoding and immediate bits calculation
16254 right. */
16255 et.size /= 2;
16256
16257 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16258 VQMOVN.I<size> <Dd>, <Qm>. */
16259 if (imm == 0)
16260 {
16261 inst.operands[2].present = 0;
16262 inst.instruction = N_MNEM_vqmovn;
16263 do_neon_qmovn ();
16264 return;
16265 }
16266
16267 constraint (imm < 1 || (unsigned)imm > et.size,
16268 _("immediate out of range"));
16269 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
16270 }
16271
16272 static void
16273 do_neon_rshift_sat_narrow_u (void)
16274 {
16275 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16276 or unsigned. If operands are unsigned, results must also be unsigned. */
16277 struct neon_type_el et = neon_check_type (2, NS_DQI,
16278 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16279 int imm = inst.operands[2].imm;
16280 /* This gets the bounds check, size encoding and immediate bits calculation
16281 right. */
16282 et.size /= 2;
16283
16284 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16285 VQMOVUN.I<size> <Dd>, <Qm>. */
16286 if (imm == 0)
16287 {
16288 inst.operands[2].present = 0;
16289 inst.instruction = N_MNEM_vqmovun;
16290 do_neon_qmovun ();
16291 return;
16292 }
16293
16294 constraint (imm < 1 || (unsigned)imm > et.size,
16295 _("immediate out of range"));
16296 /* FIXME: The manual is kind of unclear about what value U should have in
16297 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16298 must be 1. */
16299 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
16300 }
16301
16302 static void
16303 do_neon_movn (void)
16304 {
16305 struct neon_type_el et = neon_check_type (2, NS_DQ,
16306 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16307 NEON_ENCODE (INTEGER, inst);
16308 neon_two_same (0, 1, et.size / 2);
16309 }
16310
16311 static void
16312 do_neon_rshift_narrow (void)
16313 {
16314 struct neon_type_el et = neon_check_type (2, NS_DQI,
16315 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16316 int imm = inst.operands[2].imm;
16317 /* This gets the bounds check, size encoding and immediate bits calculation
16318 right. */
16319 et.size /= 2;
16320
16321 /* If immediate is zero then we are a pseudo-instruction for
16322 VMOVN.I<size> <Dd>, <Qm> */
16323 if (imm == 0)
16324 {
16325 inst.operands[2].present = 0;
16326 inst.instruction = N_MNEM_vmovn;
16327 do_neon_movn ();
16328 return;
16329 }
16330
16331 constraint (imm < 1 || (unsigned)imm > et.size,
16332 _("immediate out of range for narrowing operation"));
16333 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
16334 }
16335
16336 static void
16337 do_neon_shll (void)
16338 {
16339 /* FIXME: Type checking when lengthening. */
16340 struct neon_type_el et = neon_check_type (2, NS_QDI,
16341 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
16342 unsigned imm = inst.operands[2].imm;
16343
16344 if (imm == et.size)
16345 {
16346 /* Maximum shift variant. */
16347 NEON_ENCODE (INTEGER, inst);
16348 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16349 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16350 inst.instruction |= LOW4 (inst.operands[1].reg);
16351 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16352 inst.instruction |= neon_logbits (et.size) << 18;
16353
16354 neon_dp_fixup (&inst);
16355 }
16356 else
16357 {
16358 /* A more-specific type check for non-max versions. */
16359 et = neon_check_type (2, NS_QDI,
16360 N_EQK | N_DBL, N_SU_32 | N_KEY);
16361 NEON_ENCODE (IMMED, inst);
16362 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
16363 }
16364 }
16365
16366 /* Check the various types for the VCVT instruction, and return which version
16367 the current instruction is. */
16368
16369 #define CVT_FLAVOUR_VAR \
16370 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16371 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16372 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16373 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16374 /* Half-precision conversions. */ \
16375 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16376 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16377 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16378 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16379 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16380 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16381 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16382 Compared with single/double precision variants, only the co-processor \
16383 field is different, so the encoding flow is reused here. */ \
16384 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16385 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16386 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16387 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16388 /* VFP instructions. */ \
16389 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16390 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16391 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16392 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16393 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16394 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16395 /* VFP instructions with bitshift. */ \
16396 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16397 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16398 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16399 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16400 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16401 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16402 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16403 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16404
16405 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16406 neon_cvt_flavour_##C,
16407
16408 /* The different types of conversions we can do. */
16409 enum neon_cvt_flavour
16410 {
16411 CVT_FLAVOUR_VAR
16412 neon_cvt_flavour_invalid,
16413 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
16414 };
16415
16416 #undef CVT_VAR
16417
16418 static enum neon_cvt_flavour
16419 get_neon_cvt_flavour (enum neon_shape rs)
16420 {
16421 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16422 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16423 if (et.type != NT_invtype) \
16424 { \
16425 inst.error = NULL; \
16426 return (neon_cvt_flavour_##C); \
16427 }
16428
16429 struct neon_type_el et;
16430 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16431 || rs == NS_FF) ? N_VFP : 0;
16432 /* The instruction versions which take an immediate take one register
16433 argument, which is extended to the width of the full register. Thus the
16434 "source" and "destination" registers must have the same width. Hack that
16435 here by making the size equal to the key (wider, in this case) operand. */
16436 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16437
16438 CVT_FLAVOUR_VAR;
16439
16440 return neon_cvt_flavour_invalid;
16441 #undef CVT_VAR
16442 }
16443
16444 enum neon_cvt_mode
16445 {
16446 neon_cvt_mode_a,
16447 neon_cvt_mode_n,
16448 neon_cvt_mode_p,
16449 neon_cvt_mode_m,
16450 neon_cvt_mode_z,
16451 neon_cvt_mode_x,
16452 neon_cvt_mode_r
16453 };
16454
16455 /* Neon-syntax VFP conversions. */
16456
16457 static void
16458 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16459 {
16460 const char *opname = 0;
16461
16462 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16463 || rs == NS_FHI || rs == NS_HFI)
16464 {
16465 /* Conversions with immediate bitshift. */
16466 const char *enc[] =
16467 {
16468 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16469 CVT_FLAVOUR_VAR
16470 NULL
16471 #undef CVT_VAR
16472 };
16473
16474 if (flavour < (int) ARRAY_SIZE (enc))
16475 {
16476 opname = enc[flavour];
16477 constraint (inst.operands[0].reg != inst.operands[1].reg,
16478 _("operands 0 and 1 must be the same register"));
16479 inst.operands[1] = inst.operands[2];
16480 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16481 }
16482 }
16483 else
16484 {
16485 /* Conversions without bitshift. */
16486 const char *enc[] =
16487 {
16488 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16489 CVT_FLAVOUR_VAR
16490 NULL
16491 #undef CVT_VAR
16492 };
16493
16494 if (flavour < (int) ARRAY_SIZE (enc))
16495 opname = enc[flavour];
16496 }
16497
16498 if (opname)
16499 do_vfp_nsyn_opcode (opname);
16500
16501 /* ARMv8.2 fp16 VCVT instruction. */
16502 if (flavour == neon_cvt_flavour_s32_f16
16503 || flavour == neon_cvt_flavour_u32_f16
16504 || flavour == neon_cvt_flavour_f16_u32
16505 || flavour == neon_cvt_flavour_f16_s32)
16506 do_scalar_fp16_v82_encode ();
16507 }
16508
16509 static void
16510 do_vfp_nsyn_cvtz (void)
16511 {
16512 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16513 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16514 const char *enc[] =
16515 {
16516 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16517 CVT_FLAVOUR_VAR
16518 NULL
16519 #undef CVT_VAR
16520 };
16521
16522 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16523 do_vfp_nsyn_opcode (enc[flavour]);
16524 }
16525
16526 static void
16527 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16528 enum neon_cvt_mode mode)
16529 {
16530 int sz, op;
16531 int rm;
16532
16533 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16534 D register operands. */
16535 if (flavour == neon_cvt_flavour_s32_f64
16536 || flavour == neon_cvt_flavour_u32_f64)
16537 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16538 _(BAD_FPU));
16539
16540 if (flavour == neon_cvt_flavour_s32_f16
16541 || flavour == neon_cvt_flavour_u32_f16)
16542 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16543 _(BAD_FP16));
16544
16545 set_pred_insn_type (OUTSIDE_PRED_INSN);
16546
16547 switch (flavour)
16548 {
16549 case neon_cvt_flavour_s32_f64:
16550 sz = 1;
16551 op = 1;
16552 break;
16553 case neon_cvt_flavour_s32_f32:
16554 sz = 0;
16555 op = 1;
16556 break;
16557 case neon_cvt_flavour_s32_f16:
16558 sz = 0;
16559 op = 1;
16560 break;
16561 case neon_cvt_flavour_u32_f64:
16562 sz = 1;
16563 op = 0;
16564 break;
16565 case neon_cvt_flavour_u32_f32:
16566 sz = 0;
16567 op = 0;
16568 break;
16569 case neon_cvt_flavour_u32_f16:
16570 sz = 0;
16571 op = 0;
16572 break;
16573 default:
16574 first_error (_("invalid instruction shape"));
16575 return;
16576 }
16577
16578 switch (mode)
16579 {
16580 case neon_cvt_mode_a: rm = 0; break;
16581 case neon_cvt_mode_n: rm = 1; break;
16582 case neon_cvt_mode_p: rm = 2; break;
16583 case neon_cvt_mode_m: rm = 3; break;
16584 default: first_error (_("invalid rounding mode")); return;
16585 }
16586
16587 NEON_ENCODE (FPV8, inst);
16588 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16589 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16590 inst.instruction |= sz << 8;
16591
16592 /* ARMv8.2 fp16 VCVT instruction. */
16593 if (flavour == neon_cvt_flavour_s32_f16
16594 ||flavour == neon_cvt_flavour_u32_f16)
16595 do_scalar_fp16_v82_encode ();
16596 inst.instruction |= op << 7;
16597 inst.instruction |= rm << 16;
16598 inst.instruction |= 0xf0000000;
16599 inst.is_neon = TRUE;
16600 }
16601
16602 static void
16603 do_neon_cvt_1 (enum neon_cvt_mode mode)
16604 {
16605 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16606 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16607 NS_FH, NS_HF, NS_FHI, NS_HFI,
16608 NS_NULL);
16609 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16610
16611 if (flavour == neon_cvt_flavour_invalid)
16612 return;
16613
16614 /* PR11109: Handle round-to-zero for VCVT conversions. */
16615 if (mode == neon_cvt_mode_z
16616 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16617 && (flavour == neon_cvt_flavour_s16_f16
16618 || flavour == neon_cvt_flavour_u16_f16
16619 || flavour == neon_cvt_flavour_s32_f32
16620 || flavour == neon_cvt_flavour_u32_f32
16621 || flavour == neon_cvt_flavour_s32_f64
16622 || flavour == neon_cvt_flavour_u32_f64)
16623 && (rs == NS_FD || rs == NS_FF))
16624 {
16625 do_vfp_nsyn_cvtz ();
16626 return;
16627 }
16628
16629 /* ARMv8.2 fp16 VCVT conversions. */
16630 if (mode == neon_cvt_mode_z
16631 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
16632 && (flavour == neon_cvt_flavour_s32_f16
16633 || flavour == neon_cvt_flavour_u32_f16)
16634 && (rs == NS_FH))
16635 {
16636 do_vfp_nsyn_cvtz ();
16637 do_scalar_fp16_v82_encode ();
16638 return;
16639 }
16640
16641 /* VFP rather than Neon conversions. */
16642 if (flavour >= neon_cvt_flavour_first_fp)
16643 {
16644 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16645 do_vfp_nsyn_cvt (rs, flavour);
16646 else
16647 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16648
16649 return;
16650 }
16651
16652 switch (rs)
16653 {
16654 case NS_DDI:
16655 case NS_QQI:
16656 {
16657 unsigned immbits;
16658 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16659 0x0000100, 0x1000100, 0x0, 0x1000000};
16660
16661 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16662 return;
16663
16664 /* Fixed-point conversion with #0 immediate is encoded as an
16665 integer conversion. */
16666 if (inst.operands[2].present && inst.operands[2].imm == 0)
16667 goto int_encode;
16668 NEON_ENCODE (IMMED, inst);
16669 if (flavour != neon_cvt_flavour_invalid)
16670 inst.instruction |= enctab[flavour];
16671 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16672 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16673 inst.instruction |= LOW4 (inst.operands[1].reg);
16674 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16675 inst.instruction |= neon_quad (rs) << 6;
16676 inst.instruction |= 1 << 21;
16677 if (flavour < neon_cvt_flavour_s16_f16)
16678 {
16679 inst.instruction |= 1 << 21;
16680 immbits = 32 - inst.operands[2].imm;
16681 inst.instruction |= immbits << 16;
16682 }
16683 else
16684 {
16685 inst.instruction |= 3 << 20;
16686 immbits = 16 - inst.operands[2].imm;
16687 inst.instruction |= immbits << 16;
16688 inst.instruction &= ~(1 << 9);
16689 }
16690
16691 neon_dp_fixup (&inst);
16692 }
16693 break;
16694
16695 case NS_DD:
16696 case NS_QQ:
16697 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
16698 {
16699 NEON_ENCODE (FLOAT, inst);
16700 set_pred_insn_type (OUTSIDE_PRED_INSN);
16701
16702 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16703 return;
16704
16705 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16706 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16707 inst.instruction |= LOW4 (inst.operands[1].reg);
16708 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16709 inst.instruction |= neon_quad (rs) << 6;
16710 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
16711 || flavour == neon_cvt_flavour_u32_f32) << 7;
16712 inst.instruction |= mode << 8;
16713 if (flavour == neon_cvt_flavour_u16_f16
16714 || flavour == neon_cvt_flavour_s16_f16)
16715 /* Mask off the original size bits and reencode them. */
16716 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
16717
16718 if (thumb_mode)
16719 inst.instruction |= 0xfc000000;
16720 else
16721 inst.instruction |= 0xf0000000;
16722 }
16723 else
16724 {
16725 int_encode:
16726 {
16727 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
16728 0x100, 0x180, 0x0, 0x080};
16729
16730 NEON_ENCODE (INTEGER, inst);
16731
16732 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16733 return;
16734
16735 if (flavour != neon_cvt_flavour_invalid)
16736 inst.instruction |= enctab[flavour];
16737
16738 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16739 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16740 inst.instruction |= LOW4 (inst.operands[1].reg);
16741 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16742 inst.instruction |= neon_quad (rs) << 6;
16743 if (flavour >= neon_cvt_flavour_s16_f16
16744 && flavour <= neon_cvt_flavour_f16_u16)
16745 /* Half precision. */
16746 inst.instruction |= 1 << 18;
16747 else
16748 inst.instruction |= 2 << 18;
16749
16750 neon_dp_fixup (&inst);
16751 }
16752 }
16753 break;
16754
16755 /* Half-precision conversions for Advanced SIMD -- neon. */
16756 case NS_QD:
16757 case NS_DQ:
16758 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16759 return;
16760
16761 if ((rs == NS_DQ)
16762 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
16763 {
16764 as_bad (_("operand size must match register width"));
16765 break;
16766 }
16767
16768 if ((rs == NS_QD)
16769 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
16770 {
16771 as_bad (_("operand size must match register width"));
16772 break;
16773 }
16774
16775 if (rs == NS_DQ)
16776 inst.instruction = 0x3b60600;
16777 else
16778 inst.instruction = 0x3b60700;
16779
16780 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16781 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16782 inst.instruction |= LOW4 (inst.operands[1].reg);
16783 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16784 neon_dp_fixup (&inst);
16785 break;
16786
16787 default:
16788 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16789 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16790 do_vfp_nsyn_cvt (rs, flavour);
16791 else
16792 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16793 }
16794 }
16795
16796 static void
16797 do_neon_cvtr (void)
16798 {
16799 do_neon_cvt_1 (neon_cvt_mode_x);
16800 }
16801
16802 static void
16803 do_neon_cvt (void)
16804 {
16805 do_neon_cvt_1 (neon_cvt_mode_z);
16806 }
16807
16808 static void
16809 do_neon_cvta (void)
16810 {
16811 do_neon_cvt_1 (neon_cvt_mode_a);
16812 }
16813
16814 static void
16815 do_neon_cvtn (void)
16816 {
16817 do_neon_cvt_1 (neon_cvt_mode_n);
16818 }
16819
16820 static void
16821 do_neon_cvtp (void)
16822 {
16823 do_neon_cvt_1 (neon_cvt_mode_p);
16824 }
16825
16826 static void
16827 do_neon_cvtm (void)
16828 {
16829 do_neon_cvt_1 (neon_cvt_mode_m);
16830 }
16831
16832 static void
16833 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
16834 {
16835 if (is_double)
16836 mark_feature_used (&fpu_vfp_ext_armv8);
16837
16838 encode_arm_vfp_reg (inst.operands[0].reg,
16839 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
16840 encode_arm_vfp_reg (inst.operands[1].reg,
16841 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
16842 inst.instruction |= to ? 0x10000 : 0;
16843 inst.instruction |= t ? 0x80 : 0;
16844 inst.instruction |= is_double ? 0x100 : 0;
16845 do_vfp_cond_or_thumb ();
16846 }
16847
16848 static void
16849 do_neon_cvttb_1 (bfd_boolean t)
16850 {
16851 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16852 NS_DF, NS_DH, NS_NULL);
16853
16854 if (rs == NS_NULL)
16855 return;
16856 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16857 {
16858 inst.error = NULL;
16859 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16860 }
16861 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16862 {
16863 inst.error = NULL;
16864 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16865 }
16866 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16867 {
16868 /* The VCVTB and VCVTT instructions with D-register operands
16869 don't work for SP only targets. */
16870 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16871 _(BAD_FPU));
16872
16873 inst.error = NULL;
16874 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16875 }
16876 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16877 {
16878 /* The VCVTB and VCVTT instructions with D-register operands
16879 don't work for SP only targets. */
16880 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16881 _(BAD_FPU));
16882
16883 inst.error = NULL;
16884 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16885 }
16886 else
16887 return;
16888 }
16889
16890 static void
16891 do_neon_cvtb (void)
16892 {
16893 do_neon_cvttb_1 (FALSE);
16894 }
16895
16896
16897 static void
16898 do_neon_cvtt (void)
16899 {
16900 do_neon_cvttb_1 (TRUE);
16901 }
16902
16903 static void
16904 neon_move_immediate (void)
16905 {
16906 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16907 struct neon_type_el et = neon_check_type (2, rs,
16908 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16909 unsigned immlo, immhi = 0, immbits;
16910 int op, cmode, float_p;
16911
16912 constraint (et.type == NT_invtype,
16913 _("operand size must be specified for immediate VMOV"));
16914
16915 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16916 op = (inst.instruction & (1 << 5)) != 0;
16917
16918 immlo = inst.operands[1].imm;
16919 if (inst.operands[1].regisimm)
16920 immhi = inst.operands[1].reg;
16921
16922 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16923 _("immediate has bits set outside the operand size"));
16924
16925 float_p = inst.operands[1].immisfloat;
16926
16927 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16928 et.size, et.type)) == FAIL)
16929 {
16930 /* Invert relevant bits only. */
16931 neon_invert_size (&immlo, &immhi, et.size);
16932 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16933 with one or the other; those cases are caught by
16934 neon_cmode_for_move_imm. */
16935 op = !op;
16936 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16937 &op, et.size, et.type)) == FAIL)
16938 {
16939 first_error (_("immediate out of range"));
16940 return;
16941 }
16942 }
16943
16944 inst.instruction &= ~(1 << 5);
16945 inst.instruction |= op << 5;
16946
16947 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16948 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16949 inst.instruction |= neon_quad (rs) << 6;
16950 inst.instruction |= cmode << 8;
16951
16952 neon_write_immbits (immbits);
16953 }
16954
16955 static void
16956 do_neon_mvn (void)
16957 {
16958 if (inst.operands[1].isreg)
16959 {
16960 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16961
16962 NEON_ENCODE (INTEGER, inst);
16963 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16964 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16965 inst.instruction |= LOW4 (inst.operands[1].reg);
16966 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16967 inst.instruction |= neon_quad (rs) << 6;
16968 }
16969 else
16970 {
16971 NEON_ENCODE (IMMED, inst);
16972 neon_move_immediate ();
16973 }
16974
16975 neon_dp_fixup (&inst);
16976 }
16977
16978 /* Encode instructions of form:
16979
16980 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16981 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16982
16983 static void
16984 neon_mixed_length (struct neon_type_el et, unsigned size)
16985 {
16986 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16987 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16988 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16989 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16990 inst.instruction |= LOW4 (inst.operands[2].reg);
16991 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16992 inst.instruction |= (et.type == NT_unsigned) << 24;
16993 inst.instruction |= neon_logbits (size) << 20;
16994
16995 neon_dp_fixup (&inst);
16996 }
16997
16998 static void
16999 do_neon_dyadic_long (void)
17000 {
17001 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
17002 if (rs == NS_QDD)
17003 {
17004 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
17005 return;
17006
17007 NEON_ENCODE (INTEGER, inst);
17008 /* FIXME: Type checking for lengthening op. */
17009 struct neon_type_el et = neon_check_type (3, NS_QDD,
17010 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
17011 neon_mixed_length (et, et.size);
17012 }
17013 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
17014 && (inst.cond == 0xf || inst.cond == 0x10))
17015 {
17016 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17017 in an IT block with le/lt conditions. */
17018
17019 if (inst.cond == 0xf)
17020 inst.cond = 0xb;
17021 else if (inst.cond == 0x10)
17022 inst.cond = 0xd;
17023
17024 inst.pred_insn_type = INSIDE_IT_INSN;
17025
17026 if (inst.instruction == N_MNEM_vaddl)
17027 {
17028 inst.instruction = N_MNEM_vadd;
17029 do_neon_addsub_if_i ();
17030 }
17031 else if (inst.instruction == N_MNEM_vsubl)
17032 {
17033 inst.instruction = N_MNEM_vsub;
17034 do_neon_addsub_if_i ();
17035 }
17036 else if (inst.instruction == N_MNEM_vabdl)
17037 {
17038 inst.instruction = N_MNEM_vabd;
17039 do_neon_dyadic_if_su ();
17040 }
17041 }
17042 else
17043 first_error (BAD_FPU);
17044 }
17045
17046 static void
17047 do_neon_abal (void)
17048 {
17049 struct neon_type_el et = neon_check_type (3, NS_QDD,
17050 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
17051 neon_mixed_length (et, et.size);
17052 }
17053
17054 static void
17055 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
17056 {
17057 if (inst.operands[2].isscalar)
17058 {
17059 struct neon_type_el et = neon_check_type (3, NS_QDS,
17060 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
17061 NEON_ENCODE (SCALAR, inst);
17062 neon_mul_mac (et, et.type == NT_unsigned);
17063 }
17064 else
17065 {
17066 struct neon_type_el et = neon_check_type (3, NS_QDD,
17067 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
17068 NEON_ENCODE (INTEGER, inst);
17069 neon_mixed_length (et, et.size);
17070 }
17071 }
17072
17073 static void
17074 do_neon_mac_maybe_scalar_long (void)
17075 {
17076 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
17077 }
17078
17079 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17080 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17081
17082 static unsigned
17083 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
17084 {
17085 unsigned regno = NEON_SCALAR_REG (scalar);
17086 unsigned elno = NEON_SCALAR_INDEX (scalar);
17087
17088 if (quad_p)
17089 {
17090 if (regno > 7 || elno > 3)
17091 goto bad_scalar;
17092
17093 return ((regno & 0x7)
17094 | ((elno & 0x1) << 3)
17095 | (((elno >> 1) & 0x1) << 5));
17096 }
17097 else
17098 {
17099 if (regno > 15 || elno > 1)
17100 goto bad_scalar;
17101
17102 return (((regno & 0x1) << 5)
17103 | ((regno >> 1) & 0x7)
17104 | ((elno & 0x1) << 3));
17105 }
17106
17107 bad_scalar:
17108 first_error (_("scalar out of range for multiply instruction"));
17109 return 0;
17110 }
17111
17112 static void
17113 do_neon_fmac_maybe_scalar_long (int subtype)
17114 {
17115 enum neon_shape rs;
17116 int high8;
17117 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17118 field (bits[21:20]) has different meaning. For scalar index variant, it's
17119 used to differentiate add and subtract, otherwise it's with fixed value
17120 0x2. */
17121 int size = -1;
17122
17123 if (inst.cond != COND_ALWAYS)
17124 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17125 "behaviour is UNPREDICTABLE"));
17126
17127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
17128 _(BAD_FP16));
17129
17130 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17131 _(BAD_FPU));
17132
17133 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17134 be a scalar index register. */
17135 if (inst.operands[2].isscalar)
17136 {
17137 high8 = 0xfe000000;
17138 if (subtype)
17139 size = 16;
17140 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
17141 }
17142 else
17143 {
17144 high8 = 0xfc000000;
17145 size = 32;
17146 if (subtype)
17147 inst.instruction |= (0x1 << 23);
17148 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
17149 }
17150
17151 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
17152
17153 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17154 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17155 so we simply pass -1 as size. */
17156 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
17157 neon_three_same (quad_p, 0, size);
17158
17159 /* Undo neon_dp_fixup. Redo the high eight bits. */
17160 inst.instruction &= 0x00ffffff;
17161 inst.instruction |= high8;
17162
17163 #define LOW1(R) ((R) & 0x1)
17164 #define HI4(R) (((R) >> 1) & 0xf)
17165 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17166 whether the instruction is in Q form and whether Vm is a scalar indexed
17167 operand. */
17168 if (inst.operands[2].isscalar)
17169 {
17170 unsigned rm
17171 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
17172 inst.instruction &= 0xffffffd0;
17173 inst.instruction |= rm;
17174
17175 if (!quad_p)
17176 {
17177 /* Redo Rn as well. */
17178 inst.instruction &= 0xfff0ff7f;
17179 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17180 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17181 }
17182 }
17183 else if (!quad_p)
17184 {
17185 /* Redo Rn and Rm. */
17186 inst.instruction &= 0xfff0ff50;
17187 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17188 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17189 inst.instruction |= HI4 (inst.operands[2].reg);
17190 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
17191 }
17192 }
17193
17194 static void
17195 do_neon_vfmal (void)
17196 {
17197 return do_neon_fmac_maybe_scalar_long (0);
17198 }
17199
17200 static void
17201 do_neon_vfmsl (void)
17202 {
17203 return do_neon_fmac_maybe_scalar_long (1);
17204 }
17205
17206 static void
17207 do_neon_dyadic_wide (void)
17208 {
17209 struct neon_type_el et = neon_check_type (3, NS_QQD,
17210 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
17211 neon_mixed_length (et, et.size);
17212 }
17213
17214 static void
17215 do_neon_dyadic_narrow (void)
17216 {
17217 struct neon_type_el et = neon_check_type (3, NS_QDD,
17218 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
17219 /* Operand sign is unimportant, and the U bit is part of the opcode,
17220 so force the operand type to integer. */
17221 et.type = NT_integer;
17222 neon_mixed_length (et, et.size / 2);
17223 }
17224
17225 static void
17226 do_neon_mul_sat_scalar_long (void)
17227 {
17228 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
17229 }
17230
17231 static void
17232 do_neon_vmull (void)
17233 {
17234 if (inst.operands[2].isscalar)
17235 do_neon_mac_maybe_scalar_long ();
17236 else
17237 {
17238 struct neon_type_el et = neon_check_type (3, NS_QDD,
17239 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
17240
17241 if (et.type == NT_poly)
17242 NEON_ENCODE (POLY, inst);
17243 else
17244 NEON_ENCODE (INTEGER, inst);
17245
17246 /* For polynomial encoding the U bit must be zero, and the size must
17247 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17248 obviously, as 0b10). */
17249 if (et.size == 64)
17250 {
17251 /* Check we're on the correct architecture. */
17252 if (!mark_feature_used (&fpu_crypto_ext_armv8))
17253 inst.error =
17254 _("Instruction form not available on this architecture.");
17255
17256 et.size = 32;
17257 }
17258
17259 neon_mixed_length (et, et.size);
17260 }
17261 }
17262
17263 static void
17264 do_neon_ext (void)
17265 {
17266 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17267 struct neon_type_el et = neon_check_type (3, rs,
17268 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
17269 unsigned imm = (inst.operands[3].imm * et.size) / 8;
17270
17271 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
17272 _("shift out of range"));
17273 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17274 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17275 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17276 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17277 inst.instruction |= LOW4 (inst.operands[2].reg);
17278 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17279 inst.instruction |= neon_quad (rs) << 6;
17280 inst.instruction |= imm << 8;
17281
17282 neon_dp_fixup (&inst);
17283 }
17284
17285 static void
17286 do_neon_rev (void)
17287 {
17288 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17289 struct neon_type_el et = neon_check_type (2, rs,
17290 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17291 unsigned op = (inst.instruction >> 7) & 3;
17292 /* N (width of reversed regions) is encoded as part of the bitmask. We
17293 extract it here to check the elements to be reversed are smaller.
17294 Otherwise we'd get a reserved instruction. */
17295 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
17296 gas_assert (elsize != 0);
17297 constraint (et.size >= elsize,
17298 _("elements must be smaller than reversal region"));
17299 neon_two_same (neon_quad (rs), 1, et.size);
17300 }
17301
17302 static void
17303 do_neon_dup (void)
17304 {
17305 if (inst.operands[1].isscalar)
17306 {
17307 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
17308 struct neon_type_el et = neon_check_type (2, rs,
17309 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17310 unsigned sizebits = et.size >> 3;
17311 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
17312 int logsize = neon_logbits (et.size);
17313 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
17314
17315 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
17316 return;
17317
17318 NEON_ENCODE (SCALAR, inst);
17319 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17320 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17321 inst.instruction |= LOW4 (dm);
17322 inst.instruction |= HI1 (dm) << 5;
17323 inst.instruction |= neon_quad (rs) << 6;
17324 inst.instruction |= x << 17;
17325 inst.instruction |= sizebits << 16;
17326
17327 neon_dp_fixup (&inst);
17328 }
17329 else
17330 {
17331 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
17332 struct neon_type_el et = neon_check_type (2, rs,
17333 N_8 | N_16 | N_32 | N_KEY, N_EQK);
17334 /* Duplicate ARM register to lanes of vector. */
17335 NEON_ENCODE (ARMREG, inst);
17336 switch (et.size)
17337 {
17338 case 8: inst.instruction |= 0x400000; break;
17339 case 16: inst.instruction |= 0x000020; break;
17340 case 32: inst.instruction |= 0x000000; break;
17341 default: break;
17342 }
17343 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17344 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
17345 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
17346 inst.instruction |= neon_quad (rs) << 21;
17347 /* The encoding for this instruction is identical for the ARM and Thumb
17348 variants, except for the condition field. */
17349 do_vfp_cond_or_thumb ();
17350 }
17351 }
17352
17353 /* VMOV has particularly many variations. It can be one of:
17354 0. VMOV<c><q> <Qd>, <Qm>
17355 1. VMOV<c><q> <Dd>, <Dm>
17356 (Register operations, which are VORR with Rm = Rn.)
17357 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17358 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17359 (Immediate loads.)
17360 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17361 (ARM register to scalar.)
17362 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17363 (Two ARM registers to vector.)
17364 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17365 (Scalar to ARM register.)
17366 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17367 (Vector to two ARM registers.)
17368 8. VMOV.F32 <Sd>, <Sm>
17369 9. VMOV.F64 <Dd>, <Dm>
17370 (VFP register moves.)
17371 10. VMOV.F32 <Sd>, #imm
17372 11. VMOV.F64 <Dd>, #imm
17373 (VFP float immediate load.)
17374 12. VMOV <Rd>, <Sm>
17375 (VFP single to ARM reg.)
17376 13. VMOV <Sd>, <Rm>
17377 (ARM reg to VFP single.)
17378 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17379 (Two ARM regs to two VFP singles.)
17380 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17381 (Two VFP singles to two ARM regs.)
17382
17383 These cases can be disambiguated using neon_select_shape, except cases 1/9
17384 and 3/11 which depend on the operand type too.
17385
17386 All the encoded bits are hardcoded by this function.
17387
17388 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17389 Cases 5, 7 may be used with VFPv2 and above.
17390
17391 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17392 can specify a type where it doesn't make sense to, and is ignored). */
17393
17394 static void
17395 do_neon_mov (void)
17396 {
17397 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
17398 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
17399 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
17400 NS_HR, NS_RH, NS_HI, NS_NULL);
17401 struct neon_type_el et;
17402 const char *ldconst = 0;
17403
17404 switch (rs)
17405 {
17406 case NS_DD: /* case 1/9. */
17407 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17408 /* It is not an error here if no type is given. */
17409 inst.error = NULL;
17410 if (et.type == NT_float && et.size == 64)
17411 {
17412 do_vfp_nsyn_opcode ("fcpyd");
17413 break;
17414 }
17415 /* fall through. */
17416
17417 case NS_QQ: /* case 0/1. */
17418 {
17419 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17420 return;
17421 /* The architecture manual I have doesn't explicitly state which
17422 value the U bit should have for register->register moves, but
17423 the equivalent VORR instruction has U = 0, so do that. */
17424 inst.instruction = 0x0200110;
17425 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17426 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17427 inst.instruction |= LOW4 (inst.operands[1].reg);
17428 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17429 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17430 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17431 inst.instruction |= neon_quad (rs) << 6;
17432
17433 neon_dp_fixup (&inst);
17434 }
17435 break;
17436
17437 case NS_DI: /* case 3/11. */
17438 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17439 inst.error = NULL;
17440 if (et.type == NT_float && et.size == 64)
17441 {
17442 /* case 11 (fconstd). */
17443 ldconst = "fconstd";
17444 goto encode_fconstd;
17445 }
17446 /* fall through. */
17447
17448 case NS_QI: /* case 2/3. */
17449 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17450 return;
17451 inst.instruction = 0x0800010;
17452 neon_move_immediate ();
17453 neon_dp_fixup (&inst);
17454 break;
17455
17456 case NS_SR: /* case 4. */
17457 {
17458 unsigned bcdebits = 0;
17459 int logsize;
17460 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17461 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17462
17463 /* .<size> is optional here, defaulting to .32. */
17464 if (inst.vectype.elems == 0
17465 && inst.operands[0].vectype.type == NT_invtype
17466 && inst.operands[1].vectype.type == NT_invtype)
17467 {
17468 inst.vectype.el[0].type = NT_untyped;
17469 inst.vectype.el[0].size = 32;
17470 inst.vectype.elems = 1;
17471 }
17472
17473 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17474 logsize = neon_logbits (et.size);
17475
17476 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17477 _(BAD_FPU));
17478 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17479 && et.size != 32, _(BAD_FPU));
17480 constraint (et.type == NT_invtype, _("bad type for scalar"));
17481 constraint (x >= 64 / et.size, _("scalar index out of range"));
17482
17483 switch (et.size)
17484 {
17485 case 8: bcdebits = 0x8; break;
17486 case 16: bcdebits = 0x1; break;
17487 case 32: bcdebits = 0x0; break;
17488 default: ;
17489 }
17490
17491 bcdebits |= x << logsize;
17492
17493 inst.instruction = 0xe000b10;
17494 do_vfp_cond_or_thumb ();
17495 inst.instruction |= LOW4 (dn) << 16;
17496 inst.instruction |= HI1 (dn) << 7;
17497 inst.instruction |= inst.operands[1].reg << 12;
17498 inst.instruction |= (bcdebits & 3) << 5;
17499 inst.instruction |= (bcdebits >> 2) << 21;
17500 }
17501 break;
17502
17503 case NS_DRR: /* case 5 (fmdrr). */
17504 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17505 _(BAD_FPU));
17506
17507 inst.instruction = 0xc400b10;
17508 do_vfp_cond_or_thumb ();
17509 inst.instruction |= LOW4 (inst.operands[0].reg);
17510 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
17511 inst.instruction |= inst.operands[1].reg << 12;
17512 inst.instruction |= inst.operands[2].reg << 16;
17513 break;
17514
17515 case NS_RS: /* case 6. */
17516 {
17517 unsigned logsize;
17518 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
17519 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
17520 unsigned abcdebits = 0;
17521
17522 /* .<dt> is optional here, defaulting to .32. */
17523 if (inst.vectype.elems == 0
17524 && inst.operands[0].vectype.type == NT_invtype
17525 && inst.operands[1].vectype.type == NT_invtype)
17526 {
17527 inst.vectype.el[0].type = NT_untyped;
17528 inst.vectype.el[0].size = 32;
17529 inst.vectype.elems = 1;
17530 }
17531
17532 et = neon_check_type (2, NS_NULL,
17533 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
17534 logsize = neon_logbits (et.size);
17535
17536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17537 _(BAD_FPU));
17538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17539 && et.size != 32, _(BAD_FPU));
17540 constraint (et.type == NT_invtype, _("bad type for scalar"));
17541 constraint (x >= 64 / et.size, _("scalar index out of range"));
17542
17543 switch (et.size)
17544 {
17545 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
17546 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
17547 case 32: abcdebits = 0x00; break;
17548 default: ;
17549 }
17550
17551 abcdebits |= x << logsize;
17552 inst.instruction = 0xe100b10;
17553 do_vfp_cond_or_thumb ();
17554 inst.instruction |= LOW4 (dn) << 16;
17555 inst.instruction |= HI1 (dn) << 7;
17556 inst.instruction |= inst.operands[0].reg << 12;
17557 inst.instruction |= (abcdebits & 3) << 5;
17558 inst.instruction |= (abcdebits >> 2) << 21;
17559 }
17560 break;
17561
17562 case NS_RRD: /* case 7 (fmrrd). */
17563 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17564 _(BAD_FPU));
17565
17566 inst.instruction = 0xc500b10;
17567 do_vfp_cond_or_thumb ();
17568 inst.instruction |= inst.operands[0].reg << 12;
17569 inst.instruction |= inst.operands[1].reg << 16;
17570 inst.instruction |= LOW4 (inst.operands[2].reg);
17571 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17572 break;
17573
17574 case NS_FF: /* case 8 (fcpys). */
17575 do_vfp_nsyn_opcode ("fcpys");
17576 break;
17577
17578 case NS_HI:
17579 case NS_FI: /* case 10 (fconsts). */
17580 ldconst = "fconsts";
17581 encode_fconstd:
17582 if (!inst.operands[1].immisfloat)
17583 {
17584 unsigned new_imm;
17585 /* Immediate has to fit in 8 bits so float is enough. */
17586 float imm = (float) inst.operands[1].imm;
17587 memcpy (&new_imm, &imm, sizeof (float));
17588 /* But the assembly may have been written to provide an integer
17589 bit pattern that equates to a float, so check that the
17590 conversion has worked. */
17591 if (is_quarter_float (new_imm))
17592 {
17593 if (is_quarter_float (inst.operands[1].imm))
17594 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17595
17596 inst.operands[1].imm = new_imm;
17597 inst.operands[1].immisfloat = 1;
17598 }
17599 }
17600
17601 if (is_quarter_float (inst.operands[1].imm))
17602 {
17603 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17604 do_vfp_nsyn_opcode (ldconst);
17605
17606 /* ARMv8.2 fp16 vmov.f16 instruction. */
17607 if (rs == NS_HI)
17608 do_scalar_fp16_v82_encode ();
17609 }
17610 else
17611 first_error (_("immediate out of range"));
17612 break;
17613
17614 case NS_RH:
17615 case NS_RF: /* case 12 (fmrs). */
17616 do_vfp_nsyn_opcode ("fmrs");
17617 /* ARMv8.2 fp16 vmov.f16 instruction. */
17618 if (rs == NS_RH)
17619 do_scalar_fp16_v82_encode ();
17620 break;
17621
17622 case NS_HR:
17623 case NS_FR: /* case 13 (fmsr). */
17624 do_vfp_nsyn_opcode ("fmsr");
17625 /* ARMv8.2 fp16 vmov.f16 instruction. */
17626 if (rs == NS_HR)
17627 do_scalar_fp16_v82_encode ();
17628 break;
17629
17630 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17631 (one of which is a list), but we have parsed four. Do some fiddling to
17632 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17633 expect. */
17634 case NS_RRFF: /* case 14 (fmrrs). */
17635 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
17636 _("VFP registers must be adjacent"));
17637 inst.operands[2].imm = 2;
17638 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17639 do_vfp_nsyn_opcode ("fmrrs");
17640 break;
17641
17642 case NS_FFRR: /* case 15 (fmsrr). */
17643 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
17644 _("VFP registers must be adjacent"));
17645 inst.operands[1] = inst.operands[2];
17646 inst.operands[2] = inst.operands[3];
17647 inst.operands[0].imm = 2;
17648 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17649 do_vfp_nsyn_opcode ("fmsrr");
17650 break;
17651
17652 case NS_NULL:
17653 /* neon_select_shape has determined that the instruction
17654 shape is wrong and has already set the error message. */
17655 break;
17656
17657 default:
17658 abort ();
17659 }
17660 }
17661
17662 static void
17663 do_neon_rshift_round_imm (void)
17664 {
17665 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17666 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17667 int imm = inst.operands[2].imm;
17668
17669 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17670 if (imm == 0)
17671 {
17672 inst.operands[2].present = 0;
17673 do_neon_mov ();
17674 return;
17675 }
17676
17677 constraint (imm < 1 || (unsigned)imm > et.size,
17678 _("immediate out of range for shift"));
17679 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
17680 et.size - imm);
17681 }
17682
17683 static void
17684 do_neon_movhf (void)
17685 {
17686 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
17687 constraint (rs != NS_HH, _("invalid suffix"));
17688
17689 if (inst.cond != COND_ALWAYS)
17690 {
17691 if (thumb_mode)
17692 {
17693 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17694 " the behaviour is UNPREDICTABLE"));
17695 }
17696 else
17697 {
17698 inst.error = BAD_COND;
17699 return;
17700 }
17701 }
17702
17703 do_vfp_sp_monadic ();
17704
17705 inst.is_neon = 1;
17706 inst.instruction |= 0xf0000000;
17707 }
17708
17709 static void
17710 do_neon_movl (void)
17711 {
17712 struct neon_type_el et = neon_check_type (2, NS_QD,
17713 N_EQK | N_DBL, N_SU_32 | N_KEY);
17714 unsigned sizebits = et.size >> 3;
17715 inst.instruction |= sizebits << 19;
17716 neon_two_same (0, et.type == NT_unsigned, -1);
17717 }
17718
17719 static void
17720 do_neon_trn (void)
17721 {
17722 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17723 struct neon_type_el et = neon_check_type (2, rs,
17724 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17725 NEON_ENCODE (INTEGER, inst);
17726 neon_two_same (neon_quad (rs), 1, et.size);
17727 }
17728
17729 static void
17730 do_neon_zip_uzp (void)
17731 {
17732 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17733 struct neon_type_el et = neon_check_type (2, rs,
17734 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17735 if (rs == NS_DD && et.size == 32)
17736 {
17737 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17738 inst.instruction = N_MNEM_vtrn;
17739 do_neon_trn ();
17740 return;
17741 }
17742 neon_two_same (neon_quad (rs), 1, et.size);
17743 }
17744
17745 static void
17746 do_neon_sat_abs_neg (void)
17747 {
17748 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17749 struct neon_type_el et = neon_check_type (2, rs,
17750 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17751 neon_two_same (neon_quad (rs), 1, et.size);
17752 }
17753
17754 static void
17755 do_neon_pair_long (void)
17756 {
17757 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17758 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
17759 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17760 inst.instruction |= (et.type == NT_unsigned) << 7;
17761 neon_two_same (neon_quad (rs), 1, et.size);
17762 }
17763
17764 static void
17765 do_neon_recip_est (void)
17766 {
17767 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17768 struct neon_type_el et = neon_check_type (2, rs,
17769 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
17770 inst.instruction |= (et.type == NT_float) << 8;
17771 neon_two_same (neon_quad (rs), 1, et.size);
17772 }
17773
17774 static void
17775 do_neon_cls (void)
17776 {
17777 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17778 struct neon_type_el et = neon_check_type (2, rs,
17779 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17780 neon_two_same (neon_quad (rs), 1, et.size);
17781 }
17782
17783 static void
17784 do_neon_clz (void)
17785 {
17786 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17787 struct neon_type_el et = neon_check_type (2, rs,
17788 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
17789 neon_two_same (neon_quad (rs), 1, et.size);
17790 }
17791
17792 static void
17793 do_neon_cnt (void)
17794 {
17795 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17796 struct neon_type_el et = neon_check_type (2, rs,
17797 N_EQK | N_INT, N_8 | N_KEY);
17798 neon_two_same (neon_quad (rs), 1, et.size);
17799 }
17800
17801 static void
17802 do_neon_swp (void)
17803 {
17804 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17805 neon_two_same (neon_quad (rs), 1, -1);
17806 }
17807
17808 static void
17809 do_neon_tbl_tbx (void)
17810 {
17811 unsigned listlenbits;
17812 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
17813
17814 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
17815 {
17816 first_error (_("bad list length for table lookup"));
17817 return;
17818 }
17819
17820 listlenbits = inst.operands[1].imm - 1;
17821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17823 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17824 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17825 inst.instruction |= LOW4 (inst.operands[2].reg);
17826 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17827 inst.instruction |= listlenbits << 8;
17828
17829 neon_dp_fixup (&inst);
17830 }
17831
17832 static void
17833 do_neon_ldm_stm (void)
17834 {
17835 /* P, U and L bits are part of bitmask. */
17836 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
17837 unsigned offsetbits = inst.operands[1].imm * 2;
17838
17839 if (inst.operands[1].issingle)
17840 {
17841 do_vfp_nsyn_ldm_stm (is_dbmode);
17842 return;
17843 }
17844
17845 constraint (is_dbmode && !inst.operands[0].writeback,
17846 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17847
17848 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
17849 _("register list must contain at least 1 and at most 16 "
17850 "registers"));
17851
17852 inst.instruction |= inst.operands[0].reg << 16;
17853 inst.instruction |= inst.operands[0].writeback << 21;
17854 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17855 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
17856
17857 inst.instruction |= offsetbits;
17858
17859 do_vfp_cond_or_thumb ();
17860 }
17861
17862 static void
17863 do_neon_ldr_str (void)
17864 {
17865 int is_ldr = (inst.instruction & (1 << 20)) != 0;
17866
17867 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17868 And is UNPREDICTABLE in thumb mode. */
17869 if (!is_ldr
17870 && inst.operands[1].reg == REG_PC
17871 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
17872 {
17873 if (thumb_mode)
17874 inst.error = _("Use of PC here is UNPREDICTABLE");
17875 else if (warn_on_deprecated)
17876 as_tsktsk (_("Use of PC here is deprecated"));
17877 }
17878
17879 if (inst.operands[0].issingle)
17880 {
17881 if (is_ldr)
17882 do_vfp_nsyn_opcode ("flds");
17883 else
17884 do_vfp_nsyn_opcode ("fsts");
17885
17886 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17887 if (inst.vectype.el[0].size == 16)
17888 do_scalar_fp16_v82_encode ();
17889 }
17890 else
17891 {
17892 if (is_ldr)
17893 do_vfp_nsyn_opcode ("fldd");
17894 else
17895 do_vfp_nsyn_opcode ("fstd");
17896 }
17897 }
17898
17899 static void
17900 do_t_vldr_vstr_sysreg (void)
17901 {
17902 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
17903 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
17904
17905 /* Use of PC is UNPREDICTABLE. */
17906 if (inst.operands[1].reg == REG_PC)
17907 inst.error = _("Use of PC here is UNPREDICTABLE");
17908
17909 if (inst.operands[1].immisreg)
17910 inst.error = _("instruction does not accept register index");
17911
17912 if (!inst.operands[1].isreg)
17913 inst.error = _("instruction does not accept PC-relative addressing");
17914
17915 if (abs (inst.operands[1].imm) >= (1 << 7))
17916 inst.error = _("immediate value out of range");
17917
17918 inst.instruction = 0xec000f80;
17919 if (is_vldr)
17920 inst.instruction |= 1 << sysreg_vldr_bitno;
17921 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
17922 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
17923 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
17924 }
17925
17926 static void
17927 do_vldr_vstr (void)
17928 {
17929 bfd_boolean sysreg_op = !inst.operands[0].isreg;
17930
17931 /* VLDR/VSTR (System Register). */
17932 if (sysreg_op)
17933 {
17934 if (!mark_feature_used (&arm_ext_v8_1m_main))
17935 as_bad (_("Instruction not permitted on this architecture"));
17936
17937 do_t_vldr_vstr_sysreg ();
17938 }
17939 /* VLDR/VSTR. */
17940 else
17941 {
17942 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
17943 as_bad (_("Instruction not permitted on this architecture"));
17944 do_neon_ldr_str ();
17945 }
17946 }
17947
17948 /* "interleave" version also handles non-interleaving register VLD1/VST1
17949 instructions. */
17950
17951 static void
17952 do_neon_ld_st_interleave (void)
17953 {
17954 struct neon_type_el et = neon_check_type (1, NS_NULL,
17955 N_8 | N_16 | N_32 | N_64);
17956 unsigned alignbits = 0;
17957 unsigned idx;
17958 /* The bits in this table go:
17959 0: register stride of one (0) or two (1)
17960 1,2: register list length, minus one (1, 2, 3, 4).
17961 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17962 We use -1 for invalid entries. */
17963 const int typetable[] =
17964 {
17965 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17966 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17967 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17968 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17969 };
17970 int typebits;
17971
17972 if (et.type == NT_invtype)
17973 return;
17974
17975 if (inst.operands[1].immisalign)
17976 switch (inst.operands[1].imm >> 8)
17977 {
17978 case 64: alignbits = 1; break;
17979 case 128:
17980 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17981 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17982 goto bad_alignment;
17983 alignbits = 2;
17984 break;
17985 case 256:
17986 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17987 goto bad_alignment;
17988 alignbits = 3;
17989 break;
17990 default:
17991 bad_alignment:
17992 first_error (_("bad alignment"));
17993 return;
17994 }
17995
17996 inst.instruction |= alignbits << 4;
17997 inst.instruction |= neon_logbits (et.size) << 6;
17998
17999 /* Bits [4:6] of the immediate in a list specifier encode register stride
18000 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18001 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18002 up the right value for "type" in a table based on this value and the given
18003 list style, then stick it back. */
18004 idx = ((inst.operands[0].imm >> 4) & 7)
18005 | (((inst.instruction >> 8) & 3) << 3);
18006
18007 typebits = typetable[idx];
18008
18009 constraint (typebits == -1, _("bad list type for instruction"));
18010 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
18011 _("bad element type for instruction"));
18012
18013 inst.instruction &= ~0xf00;
18014 inst.instruction |= typebits << 8;
18015 }
18016
18017 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18018 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18019 otherwise. The variable arguments are a list of pairs of legal (size, align)
18020 values, terminated with -1. */
18021
18022 static int
18023 neon_alignment_bit (int size, int align, int *do_alignment, ...)
18024 {
18025 va_list ap;
18026 int result = FAIL, thissize, thisalign;
18027
18028 if (!inst.operands[1].immisalign)
18029 {
18030 *do_alignment = 0;
18031 return SUCCESS;
18032 }
18033
18034 va_start (ap, do_alignment);
18035
18036 do
18037 {
18038 thissize = va_arg (ap, int);
18039 if (thissize == -1)
18040 break;
18041 thisalign = va_arg (ap, int);
18042
18043 if (size == thissize && align == thisalign)
18044 result = SUCCESS;
18045 }
18046 while (result != SUCCESS);
18047
18048 va_end (ap);
18049
18050 if (result == SUCCESS)
18051 *do_alignment = 1;
18052 else
18053 first_error (_("unsupported alignment for instruction"));
18054
18055 return result;
18056 }
18057
18058 static void
18059 do_neon_ld_st_lane (void)
18060 {
18061 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18062 int align_good, do_alignment = 0;
18063 int logsize = neon_logbits (et.size);
18064 int align = inst.operands[1].imm >> 8;
18065 int n = (inst.instruction >> 8) & 3;
18066 int max_el = 64 / et.size;
18067
18068 if (et.type == NT_invtype)
18069 return;
18070
18071 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
18072 _("bad list length"));
18073 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
18074 _("scalar index out of range"));
18075 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
18076 && et.size == 8,
18077 _("stride of 2 unavailable when element size is 8"));
18078
18079 switch (n)
18080 {
18081 case 0: /* VLD1 / VST1. */
18082 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
18083 32, 32, -1);
18084 if (align_good == FAIL)
18085 return;
18086 if (do_alignment)
18087 {
18088 unsigned alignbits = 0;
18089 switch (et.size)
18090 {
18091 case 16: alignbits = 0x1; break;
18092 case 32: alignbits = 0x3; break;
18093 default: ;
18094 }
18095 inst.instruction |= alignbits << 4;
18096 }
18097 break;
18098
18099 case 1: /* VLD2 / VST2. */
18100 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
18101 16, 32, 32, 64, -1);
18102 if (align_good == FAIL)
18103 return;
18104 if (do_alignment)
18105 inst.instruction |= 1 << 4;
18106 break;
18107
18108 case 2: /* VLD3 / VST3. */
18109 constraint (inst.operands[1].immisalign,
18110 _("can't use alignment with this instruction"));
18111 break;
18112
18113 case 3: /* VLD4 / VST4. */
18114 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18115 16, 64, 32, 64, 32, 128, -1);
18116 if (align_good == FAIL)
18117 return;
18118 if (do_alignment)
18119 {
18120 unsigned alignbits = 0;
18121 switch (et.size)
18122 {
18123 case 8: alignbits = 0x1; break;
18124 case 16: alignbits = 0x1; break;
18125 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
18126 default: ;
18127 }
18128 inst.instruction |= alignbits << 4;
18129 }
18130 break;
18131
18132 default: ;
18133 }
18134
18135 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18136 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18137 inst.instruction |= 1 << (4 + logsize);
18138
18139 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
18140 inst.instruction |= logsize << 10;
18141 }
18142
18143 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18144
18145 static void
18146 do_neon_ld_dup (void)
18147 {
18148 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18149 int align_good, do_alignment = 0;
18150
18151 if (et.type == NT_invtype)
18152 return;
18153
18154 switch ((inst.instruction >> 8) & 3)
18155 {
18156 case 0: /* VLD1. */
18157 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
18158 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18159 &do_alignment, 16, 16, 32, 32, -1);
18160 if (align_good == FAIL)
18161 return;
18162 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
18163 {
18164 case 1: break;
18165 case 2: inst.instruction |= 1 << 5; break;
18166 default: first_error (_("bad list length")); return;
18167 }
18168 inst.instruction |= neon_logbits (et.size) << 6;
18169 break;
18170
18171 case 1: /* VLD2. */
18172 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18173 &do_alignment, 8, 16, 16, 32, 32, 64,
18174 -1);
18175 if (align_good == FAIL)
18176 return;
18177 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
18178 _("bad list length"));
18179 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18180 inst.instruction |= 1 << 5;
18181 inst.instruction |= neon_logbits (et.size) << 6;
18182 break;
18183
18184 case 2: /* VLD3. */
18185 constraint (inst.operands[1].immisalign,
18186 _("can't use alignment with this instruction"));
18187 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
18188 _("bad list length"));
18189 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18190 inst.instruction |= 1 << 5;
18191 inst.instruction |= neon_logbits (et.size) << 6;
18192 break;
18193
18194 case 3: /* VLD4. */
18195 {
18196 int align = inst.operands[1].imm >> 8;
18197 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18198 16, 64, 32, 64, 32, 128, -1);
18199 if (align_good == FAIL)
18200 return;
18201 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
18202 _("bad list length"));
18203 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18204 inst.instruction |= 1 << 5;
18205 if (et.size == 32 && align == 128)
18206 inst.instruction |= 0x3 << 6;
18207 else
18208 inst.instruction |= neon_logbits (et.size) << 6;
18209 }
18210 break;
18211
18212 default: ;
18213 }
18214
18215 inst.instruction |= do_alignment << 4;
18216 }
18217
18218 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18219 apart from bits [11:4]. */
18220
18221 static void
18222 do_neon_ldx_stx (void)
18223 {
18224 if (inst.operands[1].isreg)
18225 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18226
18227 switch (NEON_LANE (inst.operands[0].imm))
18228 {
18229 case NEON_INTERLEAVE_LANES:
18230 NEON_ENCODE (INTERLV, inst);
18231 do_neon_ld_st_interleave ();
18232 break;
18233
18234 case NEON_ALL_LANES:
18235 NEON_ENCODE (DUP, inst);
18236 if (inst.instruction == N_INV)
18237 {
18238 first_error ("only loads support such operands");
18239 break;
18240 }
18241 do_neon_ld_dup ();
18242 break;
18243
18244 default:
18245 NEON_ENCODE (LANE, inst);
18246 do_neon_ld_st_lane ();
18247 }
18248
18249 /* L bit comes from bit mask. */
18250 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18251 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18252 inst.instruction |= inst.operands[1].reg << 16;
18253
18254 if (inst.operands[1].postind)
18255 {
18256 int postreg = inst.operands[1].imm & 0xf;
18257 constraint (!inst.operands[1].immisreg,
18258 _("post-index must be a register"));
18259 constraint (postreg == 0xd || postreg == 0xf,
18260 _("bad register for post-index"));
18261 inst.instruction |= postreg;
18262 }
18263 else
18264 {
18265 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
18266 constraint (inst.relocs[0].exp.X_op != O_constant
18267 || inst.relocs[0].exp.X_add_number != 0,
18268 BAD_ADDR_MODE);
18269
18270 if (inst.operands[1].writeback)
18271 {
18272 inst.instruction |= 0xd;
18273 }
18274 else
18275 inst.instruction |= 0xf;
18276 }
18277
18278 if (thumb_mode)
18279 inst.instruction |= 0xf9000000;
18280 else
18281 inst.instruction |= 0xf4000000;
18282 }
18283
18284 /* FP v8. */
18285 static void
18286 do_vfp_nsyn_fpv8 (enum neon_shape rs)
18287 {
18288 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18289 D register operands. */
18290 if (neon_shape_class[rs] == SC_DOUBLE)
18291 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18292 _(BAD_FPU));
18293
18294 NEON_ENCODE (FPV8, inst);
18295
18296 if (rs == NS_FFF || rs == NS_HHH)
18297 {
18298 do_vfp_sp_dyadic ();
18299
18300 /* ARMv8.2 fp16 instruction. */
18301 if (rs == NS_HHH)
18302 do_scalar_fp16_v82_encode ();
18303 }
18304 else
18305 do_vfp_dp_rd_rn_rm ();
18306
18307 if (rs == NS_DDD)
18308 inst.instruction |= 0x100;
18309
18310 inst.instruction |= 0xf0000000;
18311 }
18312
18313 static void
18314 do_vsel (void)
18315 {
18316 set_pred_insn_type (OUTSIDE_PRED_INSN);
18317
18318 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
18319 first_error (_("invalid instruction shape"));
18320 }
18321
18322 static void
18323 do_vmaxnm (void)
18324 {
18325 set_pred_insn_type (OUTSIDE_PRED_INSN);
18326
18327 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
18328 return;
18329
18330 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18331 return;
18332
18333 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
18334 }
18335
18336 static void
18337 do_vrint_1 (enum neon_cvt_mode mode)
18338 {
18339 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
18340 struct neon_type_el et;
18341
18342 if (rs == NS_NULL)
18343 return;
18344
18345 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18346 D register operands. */
18347 if (neon_shape_class[rs] == SC_DOUBLE)
18348 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18349 _(BAD_FPU));
18350
18351 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
18352 | N_VFP);
18353 if (et.type != NT_invtype)
18354 {
18355 /* VFP encodings. */
18356 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
18357 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
18358 set_pred_insn_type (OUTSIDE_PRED_INSN);
18359
18360 NEON_ENCODE (FPV8, inst);
18361 if (rs == NS_FF || rs == NS_HH)
18362 do_vfp_sp_monadic ();
18363 else
18364 do_vfp_dp_rd_rm ();
18365
18366 switch (mode)
18367 {
18368 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
18369 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
18370 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
18371 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
18372 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
18373 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
18374 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
18375 default: abort ();
18376 }
18377
18378 inst.instruction |= (rs == NS_DD) << 8;
18379 do_vfp_cond_or_thumb ();
18380
18381 /* ARMv8.2 fp16 vrint instruction. */
18382 if (rs == NS_HH)
18383 do_scalar_fp16_v82_encode ();
18384 }
18385 else
18386 {
18387 /* Neon encodings (or something broken...). */
18388 inst.error = NULL;
18389 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
18390
18391 if (et.type == NT_invtype)
18392 return;
18393
18394 set_pred_insn_type (OUTSIDE_PRED_INSN);
18395 NEON_ENCODE (FLOAT, inst);
18396
18397 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18398 return;
18399
18400 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18401 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18402 inst.instruction |= LOW4 (inst.operands[1].reg);
18403 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18404 inst.instruction |= neon_quad (rs) << 6;
18405 /* Mask off the original size bits and reencode them. */
18406 inst.instruction = ((inst.instruction & 0xfff3ffff)
18407 | neon_logbits (et.size) << 18);
18408
18409 switch (mode)
18410 {
18411 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
18412 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
18413 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
18414 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
18415 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
18416 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
18417 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
18418 default: abort ();
18419 }
18420
18421 if (thumb_mode)
18422 inst.instruction |= 0xfc000000;
18423 else
18424 inst.instruction |= 0xf0000000;
18425 }
18426 }
18427
18428 static void
18429 do_vrintx (void)
18430 {
18431 do_vrint_1 (neon_cvt_mode_x);
18432 }
18433
18434 static void
18435 do_vrintz (void)
18436 {
18437 do_vrint_1 (neon_cvt_mode_z);
18438 }
18439
18440 static void
18441 do_vrintr (void)
18442 {
18443 do_vrint_1 (neon_cvt_mode_r);
18444 }
18445
18446 static void
18447 do_vrinta (void)
18448 {
18449 do_vrint_1 (neon_cvt_mode_a);
18450 }
18451
18452 static void
18453 do_vrintn (void)
18454 {
18455 do_vrint_1 (neon_cvt_mode_n);
18456 }
18457
18458 static void
18459 do_vrintp (void)
18460 {
18461 do_vrint_1 (neon_cvt_mode_p);
18462 }
18463
18464 static void
18465 do_vrintm (void)
18466 {
18467 do_vrint_1 (neon_cvt_mode_m);
18468 }
18469
18470 static unsigned
18471 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18472 {
18473 unsigned regno = NEON_SCALAR_REG (opnd);
18474 unsigned elno = NEON_SCALAR_INDEX (opnd);
18475
18476 if (elsize == 16 && elno < 2 && regno < 16)
18477 return regno | (elno << 4);
18478 else if (elsize == 32 && elno == 0)
18479 return regno;
18480
18481 first_error (_("scalar out of range"));
18482 return 0;
18483 }
18484
18485 static void
18486 do_vcmla (void)
18487 {
18488 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18489 _(BAD_FPU));
18490 constraint (inst.relocs[0].exp.X_op != O_constant,
18491 _("expression too complex"));
18492 unsigned rot = inst.relocs[0].exp.X_add_number;
18493 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18494 _("immediate out of range"));
18495 rot /= 90;
18496 if (inst.operands[2].isscalar)
18497 {
18498 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
18499 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18500 N_KEY | N_F16 | N_F32).size;
18501 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
18502 inst.is_neon = 1;
18503 inst.instruction = 0xfe000800;
18504 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18505 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18506 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18507 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18508 inst.instruction |= LOW4 (m);
18509 inst.instruction |= HI1 (m) << 5;
18510 inst.instruction |= neon_quad (rs) << 6;
18511 inst.instruction |= rot << 20;
18512 inst.instruction |= (size == 32) << 23;
18513 }
18514 else
18515 {
18516 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18517 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18518 N_KEY | N_F16 | N_F32).size;
18519 neon_three_same (neon_quad (rs), 0, -1);
18520 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18521 inst.instruction |= 0xfc200800;
18522 inst.instruction |= rot << 23;
18523 inst.instruction |= (size == 32) << 20;
18524 }
18525 }
18526
18527 static void
18528 do_vcadd (void)
18529 {
18530 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18531 _(BAD_FPU));
18532 constraint (inst.relocs[0].exp.X_op != O_constant,
18533 _("expression too complex"));
18534 unsigned rot = inst.relocs[0].exp.X_add_number;
18535 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18536 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18537 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18538 N_KEY | N_F16 | N_F32).size;
18539 neon_three_same (neon_quad (rs), 0, -1);
18540 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18541 inst.instruction |= 0xfc800800;
18542 inst.instruction |= (rot == 270) << 24;
18543 inst.instruction |= (size == 32) << 20;
18544 }
18545
18546 /* Dot Product instructions encoding support. */
18547
18548 static void
18549 do_neon_dotproduct (int unsigned_p)
18550 {
18551 enum neon_shape rs;
18552 unsigned scalar_oprd2 = 0;
18553 int high8;
18554
18555 if (inst.cond != COND_ALWAYS)
18556 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18557 "is UNPREDICTABLE"));
18558
18559 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18560 _(BAD_FPU));
18561
18562 /* Dot Product instructions are in three-same D/Q register format or the third
18563 operand can be a scalar index register. */
18564 if (inst.operands[2].isscalar)
18565 {
18566 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
18567 high8 = 0xfe000000;
18568 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18569 }
18570 else
18571 {
18572 high8 = 0xfc000000;
18573 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18574 }
18575
18576 if (unsigned_p)
18577 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
18578 else
18579 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
18580
18581 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18582 Product instruction, so we pass 0 as the "ubit" parameter. And the
18583 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18584 neon_three_same (neon_quad (rs), 0, 32);
18585
18586 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18587 different NEON three-same encoding. */
18588 inst.instruction &= 0x00ffffff;
18589 inst.instruction |= high8;
18590 /* Encode 'U' bit which indicates signedness. */
18591 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
18592 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18593 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18594 the instruction encoding. */
18595 if (inst.operands[2].isscalar)
18596 {
18597 inst.instruction &= 0xffffffd0;
18598 inst.instruction |= LOW4 (scalar_oprd2);
18599 inst.instruction |= HI1 (scalar_oprd2) << 5;
18600 }
18601 }
18602
18603 /* Dot Product instructions for signed integer. */
18604
18605 static void
18606 do_neon_dotproduct_s (void)
18607 {
18608 return do_neon_dotproduct (0);
18609 }
18610
18611 /* Dot Product instructions for unsigned integer. */
18612
18613 static void
18614 do_neon_dotproduct_u (void)
18615 {
18616 return do_neon_dotproduct (1);
18617 }
18618
18619 /* Crypto v1 instructions. */
18620 static void
18621 do_crypto_2op_1 (unsigned elttype, int op)
18622 {
18623 set_pred_insn_type (OUTSIDE_PRED_INSN);
18624
18625 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
18626 == NT_invtype)
18627 return;
18628
18629 inst.error = NULL;
18630
18631 NEON_ENCODE (INTEGER, inst);
18632 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18633 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18634 inst.instruction |= LOW4 (inst.operands[1].reg);
18635 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18636 if (op != -1)
18637 inst.instruction |= op << 6;
18638
18639 if (thumb_mode)
18640 inst.instruction |= 0xfc000000;
18641 else
18642 inst.instruction |= 0xf0000000;
18643 }
18644
18645 static void
18646 do_crypto_3op_1 (int u, int op)
18647 {
18648 set_pred_insn_type (OUTSIDE_PRED_INSN);
18649
18650 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
18651 N_32 | N_UNT | N_KEY).type == NT_invtype)
18652 return;
18653
18654 inst.error = NULL;
18655
18656 NEON_ENCODE (INTEGER, inst);
18657 neon_three_same (1, u, 8 << op);
18658 }
18659
18660 static void
18661 do_aese (void)
18662 {
18663 do_crypto_2op_1 (N_8, 0);
18664 }
18665
18666 static void
18667 do_aesd (void)
18668 {
18669 do_crypto_2op_1 (N_8, 1);
18670 }
18671
18672 static void
18673 do_aesmc (void)
18674 {
18675 do_crypto_2op_1 (N_8, 2);
18676 }
18677
18678 static void
18679 do_aesimc (void)
18680 {
18681 do_crypto_2op_1 (N_8, 3);
18682 }
18683
18684 static void
18685 do_sha1c (void)
18686 {
18687 do_crypto_3op_1 (0, 0);
18688 }
18689
18690 static void
18691 do_sha1p (void)
18692 {
18693 do_crypto_3op_1 (0, 1);
18694 }
18695
18696 static void
18697 do_sha1m (void)
18698 {
18699 do_crypto_3op_1 (0, 2);
18700 }
18701
18702 static void
18703 do_sha1su0 (void)
18704 {
18705 do_crypto_3op_1 (0, 3);
18706 }
18707
18708 static void
18709 do_sha256h (void)
18710 {
18711 do_crypto_3op_1 (1, 0);
18712 }
18713
18714 static void
18715 do_sha256h2 (void)
18716 {
18717 do_crypto_3op_1 (1, 1);
18718 }
18719
18720 static void
18721 do_sha256su1 (void)
18722 {
18723 do_crypto_3op_1 (1, 2);
18724 }
18725
18726 static void
18727 do_sha1h (void)
18728 {
18729 do_crypto_2op_1 (N_32, -1);
18730 }
18731
18732 static void
18733 do_sha1su1 (void)
18734 {
18735 do_crypto_2op_1 (N_32, 0);
18736 }
18737
18738 static void
18739 do_sha256su0 (void)
18740 {
18741 do_crypto_2op_1 (N_32, 1);
18742 }
18743
18744 static void
18745 do_crc32_1 (unsigned int poly, unsigned int sz)
18746 {
18747 unsigned int Rd = inst.operands[0].reg;
18748 unsigned int Rn = inst.operands[1].reg;
18749 unsigned int Rm = inst.operands[2].reg;
18750
18751 set_pred_insn_type (OUTSIDE_PRED_INSN);
18752 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
18753 inst.instruction |= LOW4 (Rn) << 16;
18754 inst.instruction |= LOW4 (Rm);
18755 inst.instruction |= sz << (thumb_mode ? 4 : 21);
18756 inst.instruction |= poly << (thumb_mode ? 20 : 9);
18757
18758 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
18759 as_warn (UNPRED_REG ("r15"));
18760 }
18761
18762 static void
18763 do_crc32b (void)
18764 {
18765 do_crc32_1 (0, 0);
18766 }
18767
18768 static void
18769 do_crc32h (void)
18770 {
18771 do_crc32_1 (0, 1);
18772 }
18773
18774 static void
18775 do_crc32w (void)
18776 {
18777 do_crc32_1 (0, 2);
18778 }
18779
18780 static void
18781 do_crc32cb (void)
18782 {
18783 do_crc32_1 (1, 0);
18784 }
18785
18786 static void
18787 do_crc32ch (void)
18788 {
18789 do_crc32_1 (1, 1);
18790 }
18791
18792 static void
18793 do_crc32cw (void)
18794 {
18795 do_crc32_1 (1, 2);
18796 }
18797
18798 static void
18799 do_vjcvt (void)
18800 {
18801 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18802 _(BAD_FPU));
18803 neon_check_type (2, NS_FD, N_S32, N_F64);
18804 do_vfp_sp_dp_cvt ();
18805 do_vfp_cond_or_thumb ();
18806 }
18807
18808 \f
18809 /* Overall per-instruction processing. */
18810
18811 /* We need to be able to fix up arbitrary expressions in some statements.
18812 This is so that we can handle symbols that are an arbitrary distance from
18813 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18814 which returns part of an address in a form which will be valid for
18815 a data instruction. We do this by pushing the expression into a symbol
18816 in the expr_section, and creating a fix for that. */
18817
18818 static void
18819 fix_new_arm (fragS * frag,
18820 int where,
18821 short int size,
18822 expressionS * exp,
18823 int pc_rel,
18824 int reloc)
18825 {
18826 fixS * new_fix;
18827
18828 switch (exp->X_op)
18829 {
18830 case O_constant:
18831 if (pc_rel)
18832 {
18833 /* Create an absolute valued symbol, so we have something to
18834 refer to in the object file. Unfortunately for us, gas's
18835 generic expression parsing will already have folded out
18836 any use of .set foo/.type foo %function that may have
18837 been used to set type information of the target location,
18838 that's being specified symbolically. We have to presume
18839 the user knows what they are doing. */
18840 char name[16 + 8];
18841 symbolS *symbol;
18842
18843 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
18844
18845 symbol = symbol_find_or_make (name);
18846 S_SET_SEGMENT (symbol, absolute_section);
18847 symbol_set_frag (symbol, &zero_address_frag);
18848 S_SET_VALUE (symbol, exp->X_add_number);
18849 exp->X_op = O_symbol;
18850 exp->X_add_symbol = symbol;
18851 exp->X_add_number = 0;
18852 }
18853 /* FALLTHROUGH */
18854 case O_symbol:
18855 case O_add:
18856 case O_subtract:
18857 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
18858 (enum bfd_reloc_code_real) reloc);
18859 break;
18860
18861 default:
18862 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
18863 pc_rel, (enum bfd_reloc_code_real) reloc);
18864 break;
18865 }
18866
18867 /* Mark whether the fix is to a THUMB instruction, or an ARM
18868 instruction. */
18869 new_fix->tc_fix_data = thumb_mode;
18870 }
18871
18872 /* Create a frg for an instruction requiring relaxation. */
18873 static void
18874 output_relax_insn (void)
18875 {
18876 char * to;
18877 symbolS *sym;
18878 int offset;
18879
18880 /* The size of the instruction is unknown, so tie the debug info to the
18881 start of the instruction. */
18882 dwarf2_emit_insn (0);
18883
18884 switch (inst.relocs[0].exp.X_op)
18885 {
18886 case O_symbol:
18887 sym = inst.relocs[0].exp.X_add_symbol;
18888 offset = inst.relocs[0].exp.X_add_number;
18889 break;
18890 case O_constant:
18891 sym = NULL;
18892 offset = inst.relocs[0].exp.X_add_number;
18893 break;
18894 default:
18895 sym = make_expr_symbol (&inst.relocs[0].exp);
18896 offset = 0;
18897 break;
18898 }
18899 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
18900 inst.relax, sym, offset, NULL/*offset, opcode*/);
18901 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
18902 }
18903
18904 /* Write a 32-bit thumb instruction to buf. */
18905 static void
18906 put_thumb32_insn (char * buf, unsigned long insn)
18907 {
18908 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
18909 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
18910 }
18911
18912 static void
18913 output_inst (const char * str)
18914 {
18915 char * to = NULL;
18916
18917 if (inst.error)
18918 {
18919 as_bad ("%s -- `%s'", inst.error, str);
18920 return;
18921 }
18922 if (inst.relax)
18923 {
18924 output_relax_insn ();
18925 return;
18926 }
18927 if (inst.size == 0)
18928 return;
18929
18930 to = frag_more (inst.size);
18931 /* PR 9814: Record the thumb mode into the current frag so that we know
18932 what type of NOP padding to use, if necessary. We override any previous
18933 setting so that if the mode has changed then the NOPS that we use will
18934 match the encoding of the last instruction in the frag. */
18935 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18936
18937 if (thumb_mode && (inst.size > THUMB_SIZE))
18938 {
18939 gas_assert (inst.size == (2 * THUMB_SIZE));
18940 put_thumb32_insn (to, inst.instruction);
18941 }
18942 else if (inst.size > INSN_SIZE)
18943 {
18944 gas_assert (inst.size == (2 * INSN_SIZE));
18945 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18946 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18947 }
18948 else
18949 md_number_to_chars (to, inst.instruction, inst.size);
18950
18951 int r;
18952 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
18953 {
18954 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
18955 fix_new_arm (frag_now, to - frag_now->fr_literal,
18956 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
18957 inst.relocs[r].type);
18958 }
18959
18960 dwarf2_emit_insn (inst.size);
18961 }
18962
18963 static char *
18964 output_it_inst (int cond, int mask, char * to)
18965 {
18966 unsigned long instruction = 0xbf00;
18967
18968 mask &= 0xf;
18969 instruction |= mask;
18970 instruction |= cond << 4;
18971
18972 if (to == NULL)
18973 {
18974 to = frag_more (2);
18975 #ifdef OBJ_ELF
18976 dwarf2_emit_insn (2);
18977 #endif
18978 }
18979
18980 md_number_to_chars (to, instruction, 2);
18981
18982 return to;
18983 }
18984
18985 /* Tag values used in struct asm_opcode's tag field. */
18986 enum opcode_tag
18987 {
18988 OT_unconditional, /* Instruction cannot be conditionalized.
18989 The ARM condition field is still 0xE. */
18990 OT_unconditionalF, /* Instruction cannot be conditionalized
18991 and carries 0xF in its ARM condition field. */
18992 OT_csuffix, /* Instruction takes a conditional suffix. */
18993 OT_csuffixF, /* Some forms of the instruction take a scalar
18994 conditional suffix, others place 0xF where the
18995 condition field would be, others take a vector
18996 conditional suffix. */
18997 OT_cinfix3, /* Instruction takes a conditional infix,
18998 beginning at character index 3. (In
18999 unified mode, it becomes a suffix.) */
19000 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
19001 tsts, cmps, cmns, and teqs. */
19002 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
19003 character index 3, even in unified mode. Used for
19004 legacy instructions where suffix and infix forms
19005 may be ambiguous. */
19006 OT_csuf_or_in3, /* Instruction takes either a conditional
19007 suffix or an infix at character index 3. */
19008 OT_odd_infix_unc, /* This is the unconditional variant of an
19009 instruction that takes a conditional infix
19010 at an unusual position. In unified mode,
19011 this variant will accept a suffix. */
19012 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
19013 are the conditional variants of instructions that
19014 take conditional infixes in unusual positions.
19015 The infix appears at character index
19016 (tag - OT_odd_infix_0). These are not accepted
19017 in unified mode. */
19018 };
19019
19020 /* Subroutine of md_assemble, responsible for looking up the primary
19021 opcode from the mnemonic the user wrote. STR points to the
19022 beginning of the mnemonic.
19023
19024 This is not simply a hash table lookup, because of conditional
19025 variants. Most instructions have conditional variants, which are
19026 expressed with a _conditional affix_ to the mnemonic. If we were
19027 to encode each conditional variant as a literal string in the opcode
19028 table, it would have approximately 20,000 entries.
19029
19030 Most mnemonics take this affix as a suffix, and in unified syntax,
19031 'most' is upgraded to 'all'. However, in the divided syntax, some
19032 instructions take the affix as an infix, notably the s-variants of
19033 the arithmetic instructions. Of those instructions, all but six
19034 have the infix appear after the third character of the mnemonic.
19035
19036 Accordingly, the algorithm for looking up primary opcodes given
19037 an identifier is:
19038
19039 1. Look up the identifier in the opcode table.
19040 If we find a match, go to step U.
19041
19042 2. Look up the last two characters of the identifier in the
19043 conditions table. If we find a match, look up the first N-2
19044 characters of the identifier in the opcode table. If we
19045 find a match, go to step CE.
19046
19047 3. Look up the fourth and fifth characters of the identifier in
19048 the conditions table. If we find a match, extract those
19049 characters from the identifier, and look up the remaining
19050 characters in the opcode table. If we find a match, go
19051 to step CM.
19052
19053 4. Fail.
19054
19055 U. Examine the tag field of the opcode structure, in case this is
19056 one of the six instructions with its conditional infix in an
19057 unusual place. If it is, the tag tells us where to find the
19058 infix; look it up in the conditions table and set inst.cond
19059 accordingly. Otherwise, this is an unconditional instruction.
19060 Again set inst.cond accordingly. Return the opcode structure.
19061
19062 CE. Examine the tag field to make sure this is an instruction that
19063 should receive a conditional suffix. If it is not, fail.
19064 Otherwise, set inst.cond from the suffix we already looked up,
19065 and return the opcode structure.
19066
19067 CM. Examine the tag field to make sure this is an instruction that
19068 should receive a conditional infix after the third character.
19069 If it is not, fail. Otherwise, undo the edits to the current
19070 line of input and proceed as for case CE. */
19071
19072 static const struct asm_opcode *
19073 opcode_lookup (char **str)
19074 {
19075 char *end, *base;
19076 char *affix;
19077 const struct asm_opcode *opcode;
19078 const struct asm_cond *cond;
19079 char save[2];
19080
19081 /* Scan up to the end of the mnemonic, which must end in white space,
19082 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19083 for (base = end = *str; *end != '\0'; end++)
19084 if (*end == ' ' || *end == '.')
19085 break;
19086
19087 if (end == base)
19088 return NULL;
19089
19090 /* Handle a possible width suffix and/or Neon type suffix. */
19091 if (end[0] == '.')
19092 {
19093 int offset = 2;
19094
19095 /* The .w and .n suffixes are only valid if the unified syntax is in
19096 use. */
19097 if (unified_syntax && end[1] == 'w')
19098 inst.size_req = 4;
19099 else if (unified_syntax && end[1] == 'n')
19100 inst.size_req = 2;
19101 else
19102 offset = 0;
19103
19104 inst.vectype.elems = 0;
19105
19106 *str = end + offset;
19107
19108 if (end[offset] == '.')
19109 {
19110 /* See if we have a Neon type suffix (possible in either unified or
19111 non-unified ARM syntax mode). */
19112 if (parse_neon_type (&inst.vectype, str) == FAIL)
19113 return NULL;
19114 }
19115 else if (end[offset] != '\0' && end[offset] != ' ')
19116 return NULL;
19117 }
19118 else
19119 *str = end;
19120
19121 /* Look for unaffixed or special-case affixed mnemonic. */
19122 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19123 end - base);
19124 if (opcode)
19125 {
19126 /* step U */
19127 if (opcode->tag < OT_odd_infix_0)
19128 {
19129 inst.cond = COND_ALWAYS;
19130 return opcode;
19131 }
19132
19133 if (warn_on_deprecated && unified_syntax)
19134 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19135 affix = base + (opcode->tag - OT_odd_infix_0);
19136 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19137 gas_assert (cond);
19138
19139 inst.cond = cond->value;
19140 return opcode;
19141 }
19142 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19143 {
19144 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19145 */
19146 if (end - base < 2)
19147 return NULL;
19148 affix = end - 1;
19149 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
19150 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19151 affix - base);
19152 /* If this opcode can not be vector predicated then don't accept it with a
19153 vector predication code. */
19154 if (opcode && !opcode->mayBeVecPred)
19155 opcode = NULL;
19156 }
19157 if (!opcode || !cond)
19158 {
19159 /* Cannot have a conditional suffix on a mnemonic of less than two
19160 characters. */
19161 if (end - base < 3)
19162 return NULL;
19163
19164 /* Look for suffixed mnemonic. */
19165 affix = end - 2;
19166 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19167 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19168 affix - base);
19169 }
19170
19171 if (opcode && cond)
19172 {
19173 /* step CE */
19174 switch (opcode->tag)
19175 {
19176 case OT_cinfix3_legacy:
19177 /* Ignore conditional suffixes matched on infix only mnemonics. */
19178 break;
19179
19180 case OT_cinfix3:
19181 case OT_cinfix3_deprecated:
19182 case OT_odd_infix_unc:
19183 if (!unified_syntax)
19184 return NULL;
19185 /* Fall through. */
19186
19187 case OT_csuffix:
19188 case OT_csuffixF:
19189 case OT_csuf_or_in3:
19190 inst.cond = cond->value;
19191 return opcode;
19192
19193 case OT_unconditional:
19194 case OT_unconditionalF:
19195 if (thumb_mode)
19196 inst.cond = cond->value;
19197 else
19198 {
19199 /* Delayed diagnostic. */
19200 inst.error = BAD_COND;
19201 inst.cond = COND_ALWAYS;
19202 }
19203 return opcode;
19204
19205 default:
19206 return NULL;
19207 }
19208 }
19209
19210 /* Cannot have a usual-position infix on a mnemonic of less than
19211 six characters (five would be a suffix). */
19212 if (end - base < 6)
19213 return NULL;
19214
19215 /* Look for infixed mnemonic in the usual position. */
19216 affix = base + 3;
19217 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19218 if (!cond)
19219 return NULL;
19220
19221 memcpy (save, affix, 2);
19222 memmove (affix, affix + 2, (end - affix) - 2);
19223 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19224 (end - base) - 2);
19225 memmove (affix + 2, affix, (end - affix) - 2);
19226 memcpy (affix, save, 2);
19227
19228 if (opcode
19229 && (opcode->tag == OT_cinfix3
19230 || opcode->tag == OT_cinfix3_deprecated
19231 || opcode->tag == OT_csuf_or_in3
19232 || opcode->tag == OT_cinfix3_legacy))
19233 {
19234 /* Step CM. */
19235 if (warn_on_deprecated && unified_syntax
19236 && (opcode->tag == OT_cinfix3
19237 || opcode->tag == OT_cinfix3_deprecated))
19238 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19239
19240 inst.cond = cond->value;
19241 return opcode;
19242 }
19243
19244 return NULL;
19245 }
19246
19247 /* This function generates an initial IT instruction, leaving its block
19248 virtually open for the new instructions. Eventually,
19249 the mask will be updated by now_pred_add_mask () each time
19250 a new instruction needs to be included in the IT block.
19251 Finally, the block is closed with close_automatic_it_block ().
19252 The block closure can be requested either from md_assemble (),
19253 a tencode (), or due to a label hook. */
19254
19255 static void
19256 new_automatic_it_block (int cond)
19257 {
19258 now_pred.state = AUTOMATIC_PRED_BLOCK;
19259 now_pred.mask = 0x18;
19260 now_pred.cc = cond;
19261 now_pred.block_length = 1;
19262 mapping_state (MAP_THUMB);
19263 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
19264 now_pred.warn_deprecated = FALSE;
19265 now_pred.insn_cond = TRUE;
19266 }
19267
19268 /* Close an automatic IT block.
19269 See comments in new_automatic_it_block (). */
19270
19271 static void
19272 close_automatic_it_block (void)
19273 {
19274 now_pred.mask = 0x10;
19275 now_pred.block_length = 0;
19276 }
19277
19278 /* Update the mask of the current automatically-generated IT
19279 instruction. See comments in new_automatic_it_block (). */
19280
19281 static void
19282 now_pred_add_mask (int cond)
19283 {
19284 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19285 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19286 | ((bitvalue) << (nbit)))
19287 const int resulting_bit = (cond & 1);
19288
19289 now_pred.mask &= 0xf;
19290 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19291 resulting_bit,
19292 (5 - now_pred.block_length));
19293 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19294 1,
19295 ((5 - now_pred.block_length) - 1));
19296 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
19297
19298 #undef CLEAR_BIT
19299 #undef SET_BIT_VALUE
19300 }
19301
19302 /* The IT blocks handling machinery is accessed through the these functions:
19303 it_fsm_pre_encode () from md_assemble ()
19304 set_pred_insn_type () optional, from the tencode functions
19305 set_pred_insn_type_last () ditto
19306 in_pred_block () ditto
19307 it_fsm_post_encode () from md_assemble ()
19308 force_automatic_it_block_close () from label handling functions
19309
19310 Rationale:
19311 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19312 initializing the IT insn type with a generic initial value depending
19313 on the inst.condition.
19314 2) During the tencode function, two things may happen:
19315 a) The tencode function overrides the IT insn type by
19316 calling either set_pred_insn_type (type) or
19317 set_pred_insn_type_last ().
19318 b) The tencode function queries the IT block state by
19319 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19320
19321 Both set_pred_insn_type and in_pred_block run the internal FSM state
19322 handling function (handle_pred_state), because: a) setting the IT insn
19323 type may incur in an invalid state (exiting the function),
19324 and b) querying the state requires the FSM to be updated.
19325 Specifically we want to avoid creating an IT block for conditional
19326 branches, so it_fsm_pre_encode is actually a guess and we can't
19327 determine whether an IT block is required until the tencode () routine
19328 has decided what type of instruction this actually it.
19329 Because of this, if set_pred_insn_type and in_pred_block have to be
19330 used, set_pred_insn_type has to be called first.
19331
19332 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19333 that determines the insn IT type depending on the inst.cond code.
19334 When a tencode () routine encodes an instruction that can be
19335 either outside an IT block, or, in the case of being inside, has to be
19336 the last one, set_pred_insn_type_last () will determine the proper
19337 IT instruction type based on the inst.cond code. Otherwise,
19338 set_pred_insn_type can be called for overriding that logic or
19339 for covering other cases.
19340
19341 Calling handle_pred_state () may not transition the IT block state to
19342 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19343 still queried. Instead, if the FSM determines that the state should
19344 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19345 after the tencode () function: that's what it_fsm_post_encode () does.
19346
19347 Since in_pred_block () calls the state handling function to get an
19348 updated state, an error may occur (due to invalid insns combination).
19349 In that case, inst.error is set.
19350 Therefore, inst.error has to be checked after the execution of
19351 the tencode () routine.
19352
19353 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19354 any pending state change (if any) that didn't take place in
19355 handle_pred_state () as explained above. */
19356
19357 static void
19358 it_fsm_pre_encode (void)
19359 {
19360 if (inst.cond != COND_ALWAYS)
19361 inst.pred_insn_type = INSIDE_IT_INSN;
19362 else
19363 inst.pred_insn_type = OUTSIDE_PRED_INSN;
19364
19365 now_pred.state_handled = 0;
19366 }
19367
19368 /* IT state FSM handling function. */
19369 /* MVE instructions and non-MVE instructions are handled differently because of
19370 the introduction of VPT blocks.
19371 Specifications say that any non-MVE instruction inside a VPT block is
19372 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19373 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19374 few exceptions this will be handled at their respective handler functions.
19375 The error messages provided depending on the different combinations possible
19376 are described in the cases below:
19377 For 'most' MVE instructions:
19378 1) In an IT block, with an IT code: syntax error
19379 2) In an IT block, with a VPT code: error: must be in a VPT block
19380 3) In an IT block, with no code: warning: UNPREDICTABLE
19381 4) In a VPT block, with an IT code: syntax error
19382 5) In a VPT block, with a VPT code: OK!
19383 6) In a VPT block, with no code: error: missing code
19384 7) Outside a pred block, with an IT code: error: syntax error
19385 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19386 9) Outside a pred block, with no code: OK!
19387 For non-MVE instructions:
19388 10) In an IT block, with an IT code: OK!
19389 11) In an IT block, with a VPT code: syntax error
19390 12) In an IT block, with no code: error: missing code
19391 13) In a VPT block, with an IT code: error: should be in an IT block
19392 14) In a VPT block, with a VPT code: syntax error
19393 15) In a VPT block, with no code: UNPREDICTABLE
19394 16) Outside a pred block, with an IT code: error: should be in an IT block
19395 17) Outside a pred block, with a VPT code: syntax error
19396 18) Outside a pred block, with no code: OK!
19397 */
19398
19399
19400 static int
19401 handle_pred_state (void)
19402 {
19403 now_pred.state_handled = 1;
19404 now_pred.insn_cond = FALSE;
19405
19406 switch (now_pred.state)
19407 {
19408 case OUTSIDE_PRED_BLOCK:
19409 switch (inst.pred_insn_type)
19410 {
19411 case MVE_OUTSIDE_PRED_INSN:
19412 if (inst.cond < COND_ALWAYS)
19413 {
19414 /* Case 7: Outside a pred block, with an IT code: error: syntax
19415 error. */
19416 inst.error = BAD_SYNTAX;
19417 return FAIL;
19418 }
19419 /* Case 9: Outside a pred block, with no code: OK! */
19420 break;
19421 case OUTSIDE_PRED_INSN:
19422 if (inst.cond > COND_ALWAYS)
19423 {
19424 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19425 */
19426 inst.error = BAD_SYNTAX;
19427 return FAIL;
19428 }
19429 /* Case 18: Outside a pred block, with no code: OK! */
19430 break;
19431
19432 case INSIDE_VPT_INSN:
19433 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19434 a VPT block. */
19435 inst.error = BAD_OUT_VPT;
19436 return FAIL;
19437
19438 case INSIDE_IT_INSN:
19439 case INSIDE_IT_LAST_INSN:
19440 if (inst.cond < COND_ALWAYS)
19441 {
19442 /* Case 16: Outside a pred block, with an IT code: error: should
19443 be in an IT block. */
19444 if (thumb_mode == 0)
19445 {
19446 if (unified_syntax
19447 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
19448 as_tsktsk (_("Warning: conditional outside an IT block"\
19449 " for Thumb."));
19450 }
19451 else
19452 {
19453 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
19454 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
19455 {
19456 /* Automatically generate the IT instruction. */
19457 new_automatic_it_block (inst.cond);
19458 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
19459 close_automatic_it_block ();
19460 }
19461 else
19462 {
19463 inst.error = BAD_OUT_IT;
19464 return FAIL;
19465 }
19466 }
19467 break;
19468 }
19469 else if (inst.cond > COND_ALWAYS)
19470 {
19471 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19472 */
19473 inst.error = BAD_SYNTAX;
19474 return FAIL;
19475 }
19476 else
19477 gas_assert (0);
19478 case IF_INSIDE_IT_LAST_INSN:
19479 case NEUTRAL_IT_INSN:
19480 break;
19481
19482 case VPT_INSN:
19483 if (inst.cond != COND_ALWAYS)
19484 first_error (BAD_SYNTAX);
19485 now_pred.state = MANUAL_PRED_BLOCK;
19486 now_pred.block_length = 0;
19487 now_pred.type = VECTOR_PRED;
19488 now_pred.cc = 0;
19489 break;
19490 case IT_INSN:
19491 now_pred.state = MANUAL_PRED_BLOCK;
19492 now_pred.block_length = 0;
19493 now_pred.type = SCALAR_PRED;
19494 break;
19495 }
19496 break;
19497
19498 case AUTOMATIC_PRED_BLOCK:
19499 /* Three things may happen now:
19500 a) We should increment current it block size;
19501 b) We should close current it block (closing insn or 4 insns);
19502 c) We should close current it block and start a new one (due
19503 to incompatible conditions or
19504 4 insns-length block reached). */
19505
19506 switch (inst.pred_insn_type)
19507 {
19508 case INSIDE_VPT_INSN:
19509 case VPT_INSN:
19510 case MVE_OUTSIDE_PRED_INSN:
19511 gas_assert (0);
19512 case OUTSIDE_PRED_INSN:
19513 /* The closure of the block shall happen immediately,
19514 so any in_pred_block () call reports the block as closed. */
19515 force_automatic_it_block_close ();
19516 break;
19517
19518 case INSIDE_IT_INSN:
19519 case INSIDE_IT_LAST_INSN:
19520 case IF_INSIDE_IT_LAST_INSN:
19521 now_pred.block_length++;
19522
19523 if (now_pred.block_length > 4
19524 || !now_pred_compatible (inst.cond))
19525 {
19526 force_automatic_it_block_close ();
19527 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
19528 new_automatic_it_block (inst.cond);
19529 }
19530 else
19531 {
19532 now_pred.insn_cond = TRUE;
19533 now_pred_add_mask (inst.cond);
19534 }
19535
19536 if (now_pred.state == AUTOMATIC_PRED_BLOCK
19537 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
19538 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
19539 close_automatic_it_block ();
19540 break;
19541
19542 case NEUTRAL_IT_INSN:
19543 now_pred.block_length++;
19544 now_pred.insn_cond = TRUE;
19545
19546 if (now_pred.block_length > 4)
19547 force_automatic_it_block_close ();
19548 else
19549 now_pred_add_mask (now_pred.cc & 1);
19550 break;
19551
19552 case IT_INSN:
19553 close_automatic_it_block ();
19554 now_pred.state = MANUAL_PRED_BLOCK;
19555 break;
19556 }
19557 break;
19558
19559 case MANUAL_PRED_BLOCK:
19560 {
19561 int cond, is_last;
19562 if (now_pred.type == SCALAR_PRED)
19563 {
19564 /* Check conditional suffixes. */
19565 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
19566 now_pred.mask <<= 1;
19567 now_pred.mask &= 0x1f;
19568 is_last = (now_pred.mask == 0x10);
19569 }
19570 else
19571 {
19572 now_pred.cc ^= (now_pred.mask >> 4);
19573 cond = now_pred.cc + 0xf;
19574 now_pred.mask <<= 1;
19575 now_pred.mask &= 0x1f;
19576 is_last = now_pred.mask == 0x10;
19577 }
19578 now_pred.insn_cond = TRUE;
19579
19580 switch (inst.pred_insn_type)
19581 {
19582 case OUTSIDE_PRED_INSN:
19583 if (now_pred.type == SCALAR_PRED)
19584 {
19585 if (inst.cond == COND_ALWAYS)
19586 {
19587 /* Case 12: In an IT block, with no code: error: missing
19588 code. */
19589 inst.error = BAD_NOT_IT;
19590 return FAIL;
19591 }
19592 else if (inst.cond > COND_ALWAYS)
19593 {
19594 /* Case 11: In an IT block, with a VPT code: syntax error.
19595 */
19596 inst.error = BAD_SYNTAX;
19597 return FAIL;
19598 }
19599 else if (thumb_mode)
19600 {
19601 /* This is for some special cases where a non-MVE
19602 instruction is not allowed in an IT block, such as cbz,
19603 but are put into one with a condition code.
19604 You could argue this should be a syntax error, but we
19605 gave the 'not allowed in IT block' diagnostic in the
19606 past so we will keep doing so. */
19607 inst.error = BAD_NOT_IT;
19608 return FAIL;
19609 }
19610 break;
19611 }
19612 else
19613 {
19614 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19615 as_tsktsk (MVE_NOT_VPT);
19616 return SUCCESS;
19617 }
19618 case MVE_OUTSIDE_PRED_INSN:
19619 if (now_pred.type == SCALAR_PRED)
19620 {
19621 if (inst.cond == COND_ALWAYS)
19622 {
19623 /* Case 3: In an IT block, with no code: warning:
19624 UNPREDICTABLE. */
19625 as_tsktsk (MVE_NOT_IT);
19626 return SUCCESS;
19627 }
19628 else if (inst.cond < COND_ALWAYS)
19629 {
19630 /* Case 1: In an IT block, with an IT code: syntax error.
19631 */
19632 inst.error = BAD_SYNTAX;
19633 return FAIL;
19634 }
19635 else
19636 gas_assert (0);
19637 }
19638 else
19639 {
19640 if (inst.cond < COND_ALWAYS)
19641 {
19642 /* Case 4: In a VPT block, with an IT code: syntax error.
19643 */
19644 inst.error = BAD_SYNTAX;
19645 return FAIL;
19646 }
19647 else if (inst.cond == COND_ALWAYS)
19648 {
19649 /* Case 6: In a VPT block, with no code: error: missing
19650 code. */
19651 inst.error = BAD_NOT_VPT;
19652 return FAIL;
19653 }
19654 else
19655 {
19656 gas_assert (0);
19657 }
19658 }
19659 case INSIDE_IT_INSN:
19660 if (inst.cond > COND_ALWAYS)
19661 {
19662 /* Case 11: In an IT block, with a VPT code: syntax error. */
19663 /* Case 14: In a VPT block, with a VPT code: syntax error. */
19664 inst.error = BAD_SYNTAX;
19665 return FAIL;
19666 }
19667 else if (now_pred.type == SCALAR_PRED)
19668 {
19669 /* Case 10: In an IT block, with an IT code: OK! */
19670 if (cond != inst.cond)
19671 {
19672 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
19673 BAD_VPT_COND;
19674 return FAIL;
19675 }
19676 }
19677 else
19678 {
19679 /* Case 13: In a VPT block, with an IT code: error: should be
19680 in an IT block. */
19681 inst.error = BAD_OUT_IT;
19682 return FAIL;
19683 }
19684 break;
19685
19686 case INSIDE_VPT_INSN:
19687 if (now_pred.type == SCALAR_PRED)
19688 {
19689 /* Case 2: In an IT block, with a VPT code: error: must be in a
19690 VPT block. */
19691 inst.error = BAD_OUT_VPT;
19692 return FAIL;
19693 }
19694 /* Case 5: In a VPT block, with a VPT code: OK! */
19695 else if (cond != inst.cond)
19696 {
19697 inst.error = BAD_VPT_COND;
19698 return FAIL;
19699 }
19700 break;
19701 case INSIDE_IT_LAST_INSN:
19702 case IF_INSIDE_IT_LAST_INSN:
19703 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
19704 {
19705 /* Case 4: In a VPT block, with an IT code: syntax error. */
19706 /* Case 11: In an IT block, with a VPT code: syntax error. */
19707 inst.error = BAD_SYNTAX;
19708 return FAIL;
19709 }
19710 else if (cond != inst.cond)
19711 {
19712 inst.error = BAD_IT_COND;
19713 return FAIL;
19714 }
19715 if (!is_last)
19716 {
19717 inst.error = BAD_BRANCH;
19718 return FAIL;
19719 }
19720 break;
19721
19722 case NEUTRAL_IT_INSN:
19723 /* The BKPT instruction is unconditional even in a IT or VPT
19724 block. */
19725 break;
19726
19727 case IT_INSN:
19728 if (now_pred.type == SCALAR_PRED)
19729 {
19730 inst.error = BAD_IT_IT;
19731 return FAIL;
19732 }
19733 /* fall through. */
19734 case VPT_INSN:
19735 if (inst.cond == COND_ALWAYS)
19736 {
19737 /* Executing a VPT/VPST instruction inside an IT block or a
19738 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
19739 */
19740 if (now_pred.type == SCALAR_PRED)
19741 as_tsktsk (MVE_NOT_IT);
19742 else
19743 as_tsktsk (MVE_NOT_VPT);
19744 return SUCCESS;
19745 }
19746 else
19747 {
19748 /* VPT/VPST do not accept condition codes. */
19749 inst.error = BAD_SYNTAX;
19750 return FAIL;
19751 }
19752 }
19753 }
19754 break;
19755 }
19756
19757 return SUCCESS;
19758 }
19759
19760 struct depr_insn_mask
19761 {
19762 unsigned long pattern;
19763 unsigned long mask;
19764 const char* description;
19765 };
19766
19767 /* List of 16-bit instruction patterns deprecated in an IT block in
19768 ARMv8. */
19769 static const struct depr_insn_mask depr_it_insns[] = {
19770 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19771 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19772 { 0xa000, 0xb800, N_("ADR") },
19773 { 0x4800, 0xf800, N_("Literal loads") },
19774 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19775 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19776 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19777 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19778 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19779 { 0, 0, NULL }
19780 };
19781
19782 static void
19783 it_fsm_post_encode (void)
19784 {
19785 int is_last;
19786
19787 if (!now_pred.state_handled)
19788 handle_pred_state ();
19789
19790 if (now_pred.insn_cond
19791 && !now_pred.warn_deprecated
19792 && warn_on_deprecated
19793 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
19794 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
19795 {
19796 if (inst.instruction >= 0x10000)
19797 {
19798 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19799 "performance deprecated in ARMv8-A and ARMv8-R"));
19800 now_pred.warn_deprecated = TRUE;
19801 }
19802 else
19803 {
19804 const struct depr_insn_mask *p = depr_it_insns;
19805
19806 while (p->mask != 0)
19807 {
19808 if ((inst.instruction & p->mask) == p->pattern)
19809 {
19810 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19811 "instructions of the following class are "
19812 "performance deprecated in ARMv8-A and "
19813 "ARMv8-R: %s"), p->description);
19814 now_pred.warn_deprecated = TRUE;
19815 break;
19816 }
19817
19818 ++p;
19819 }
19820 }
19821
19822 if (now_pred.block_length > 1)
19823 {
19824 as_tsktsk (_("IT blocks containing more than one conditional "
19825 "instruction are performance deprecated in ARMv8-A and "
19826 "ARMv8-R"));
19827 now_pred.warn_deprecated = TRUE;
19828 }
19829 }
19830
19831 is_last = (now_pred.mask == 0x10);
19832 if (is_last)
19833 {
19834 now_pred.state = OUTSIDE_PRED_BLOCK;
19835 now_pred.mask = 0;
19836 }
19837 }
19838
19839 static void
19840 force_automatic_it_block_close (void)
19841 {
19842 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
19843 {
19844 close_automatic_it_block ();
19845 now_pred.state = OUTSIDE_PRED_BLOCK;
19846 now_pred.mask = 0;
19847 }
19848 }
19849
19850 static int
19851 in_pred_block (void)
19852 {
19853 if (!now_pred.state_handled)
19854 handle_pred_state ();
19855
19856 return now_pred.state != OUTSIDE_PRED_BLOCK;
19857 }
19858
19859 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19860 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19861 here, hence the "known" in the function name. */
19862
19863 static bfd_boolean
19864 known_t32_only_insn (const struct asm_opcode *opcode)
19865 {
19866 /* Original Thumb-1 wide instruction. */
19867 if (opcode->tencode == do_t_blx
19868 || opcode->tencode == do_t_branch23
19869 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
19870 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
19871 return TRUE;
19872
19873 /* Wide-only instruction added to ARMv8-M Baseline. */
19874 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
19875 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
19876 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
19877 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
19878 return TRUE;
19879
19880 return FALSE;
19881 }
19882
19883 /* Whether wide instruction variant can be used if available for a valid OPCODE
19884 in ARCH. */
19885
19886 static bfd_boolean
19887 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
19888 {
19889 if (known_t32_only_insn (opcode))
19890 return TRUE;
19891
19892 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19893 of variant T3 of B.W is checked in do_t_branch. */
19894 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19895 && opcode->tencode == do_t_branch)
19896 return TRUE;
19897
19898 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19899 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19900 && opcode->tencode == do_t_mov_cmp
19901 /* Make sure CMP instruction is not affected. */
19902 && opcode->aencode == do_mov)
19903 return TRUE;
19904
19905 /* Wide instruction variants of all instructions with narrow *and* wide
19906 variants become available with ARMv6t2. Other opcodes are either
19907 narrow-only or wide-only and are thus available if OPCODE is valid. */
19908 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
19909 return TRUE;
19910
19911 /* OPCODE with narrow only instruction variant or wide variant not
19912 available. */
19913 return FALSE;
19914 }
19915
19916 void
19917 md_assemble (char *str)
19918 {
19919 char *p = str;
19920 const struct asm_opcode * opcode;
19921
19922 /* Align the previous label if needed. */
19923 if (last_label_seen != NULL)
19924 {
19925 symbol_set_frag (last_label_seen, frag_now);
19926 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
19927 S_SET_SEGMENT (last_label_seen, now_seg);
19928 }
19929
19930 memset (&inst, '\0', sizeof (inst));
19931 int r;
19932 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19933 inst.relocs[r].type = BFD_RELOC_UNUSED;
19934
19935 opcode = opcode_lookup (&p);
19936 if (!opcode)
19937 {
19938 /* It wasn't an instruction, but it might be a register alias of
19939 the form alias .req reg, or a Neon .dn/.qn directive. */
19940 if (! create_register_alias (str, p)
19941 && ! create_neon_reg_alias (str, p))
19942 as_bad (_("bad instruction `%s'"), str);
19943
19944 return;
19945 }
19946
19947 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
19948 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19949
19950 /* The value which unconditional instructions should have in place of the
19951 condition field. */
19952 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
19953
19954 if (thumb_mode)
19955 {
19956 arm_feature_set variant;
19957
19958 variant = cpu_variant;
19959 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19960 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
19961 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
19962 /* Check that this instruction is supported for this CPU. */
19963 if (!opcode->tvariant
19964 || (thumb_mode == 1
19965 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
19966 {
19967 if (opcode->tencode == do_t_swi)
19968 as_bad (_("SVC is not permitted on this architecture"));
19969 else
19970 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
19971 return;
19972 }
19973 if (inst.cond != COND_ALWAYS && !unified_syntax
19974 && opcode->tencode != do_t_branch)
19975 {
19976 as_bad (_("Thumb does not support conditional execution"));
19977 return;
19978 }
19979
19980 /* Two things are addressed here:
19981 1) Implicit require narrow instructions on Thumb-1.
19982 This avoids relaxation accidentally introducing Thumb-2
19983 instructions.
19984 2) Reject wide instructions in non Thumb-2 cores.
19985
19986 Only instructions with narrow and wide variants need to be handled
19987 but selecting all non wide-only instructions is easier. */
19988 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
19989 && !t32_insn_ok (variant, opcode))
19990 {
19991 if (inst.size_req == 0)
19992 inst.size_req = 2;
19993 else if (inst.size_req == 4)
19994 {
19995 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
19996 as_bad (_("selected processor does not support 32bit wide "
19997 "variant of instruction `%s'"), str);
19998 else
19999 as_bad (_("selected processor does not support `%s' in "
20000 "Thumb-2 mode"), str);
20001 return;
20002 }
20003 }
20004
20005 inst.instruction = opcode->tvalue;
20006
20007 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
20008 {
20009 /* Prepare the pred_insn_type for those encodings that don't set
20010 it. */
20011 it_fsm_pre_encode ();
20012
20013 opcode->tencode ();
20014
20015 it_fsm_post_encode ();
20016 }
20017
20018 if (!(inst.error || inst.relax))
20019 {
20020 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
20021 inst.size = (inst.instruction > 0xffff ? 4 : 2);
20022 if (inst.size_req && inst.size_req != inst.size)
20023 {
20024 as_bad (_("cannot honor width suffix -- `%s'"), str);
20025 return;
20026 }
20027 }
20028
20029 /* Something has gone badly wrong if we try to relax a fixed size
20030 instruction. */
20031 gas_assert (inst.size_req == 0 || !inst.relax);
20032
20033 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20034 *opcode->tvariant);
20035 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20036 set those bits when Thumb-2 32-bit instructions are seen. The impact
20037 of relaxable instructions will be considered later after we finish all
20038 relaxation. */
20039 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
20040 variant = arm_arch_none;
20041 else
20042 variant = cpu_variant;
20043 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
20044 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20045 arm_ext_v6t2);
20046
20047 check_neon_suffixes;
20048
20049 if (!inst.error)
20050 {
20051 mapping_state (MAP_THUMB);
20052 }
20053 }
20054 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20055 {
20056 bfd_boolean is_bx;
20057
20058 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20059 is_bx = (opcode->aencode == do_bx);
20060
20061 /* Check that this instruction is supported for this CPU. */
20062 if (!(is_bx && fix_v4bx)
20063 && !(opcode->avariant &&
20064 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
20065 {
20066 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
20067 return;
20068 }
20069 if (inst.size_req)
20070 {
20071 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
20072 return;
20073 }
20074
20075 inst.instruction = opcode->avalue;
20076 if (opcode->tag == OT_unconditionalF)
20077 inst.instruction |= 0xFU << 28;
20078 else
20079 inst.instruction |= inst.cond << 28;
20080 inst.size = INSN_SIZE;
20081 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
20082 {
20083 it_fsm_pre_encode ();
20084 opcode->aencode ();
20085 it_fsm_post_encode ();
20086 }
20087 /* Arm mode bx is marked as both v4T and v5 because it's still required
20088 on a hypothetical non-thumb v5 core. */
20089 if (is_bx)
20090 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
20091 else
20092 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
20093 *opcode->avariant);
20094
20095 check_neon_suffixes;
20096
20097 if (!inst.error)
20098 {
20099 mapping_state (MAP_ARM);
20100 }
20101 }
20102 else
20103 {
20104 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20105 "-- `%s'"), str);
20106 return;
20107 }
20108 output_inst (str);
20109 }
20110
20111 static void
20112 check_pred_blocks_finished (void)
20113 {
20114 #ifdef OBJ_ELF
20115 asection *sect;
20116
20117 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
20118 if (seg_info (sect)->tc_segment_info_data.current_pred.state
20119 == MANUAL_PRED_BLOCK)
20120 {
20121 if (now_pred.type == SCALAR_PRED)
20122 as_warn (_("section '%s' finished with an open IT block."),
20123 sect->name);
20124 else
20125 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20126 sect->name);
20127 }
20128 #else
20129 if (now_pred.state == MANUAL_PRED_BLOCK)
20130 {
20131 if (now_pred.type == SCALAR_PRED)
20132 as_warn (_("file finished with an open IT block."));
20133 else
20134 as_warn (_("file finished with an open VPT/VPST block."));
20135 }
20136 #endif
20137 }
20138
20139 /* Various frobbings of labels and their addresses. */
20140
20141 void
20142 arm_start_line_hook (void)
20143 {
20144 last_label_seen = NULL;
20145 }
20146
20147 void
20148 arm_frob_label (symbolS * sym)
20149 {
20150 last_label_seen = sym;
20151
20152 ARM_SET_THUMB (sym, thumb_mode);
20153
20154 #if defined OBJ_COFF || defined OBJ_ELF
20155 ARM_SET_INTERWORK (sym, support_interwork);
20156 #endif
20157
20158 force_automatic_it_block_close ();
20159
20160 /* Note - do not allow local symbols (.Lxxx) to be labelled
20161 as Thumb functions. This is because these labels, whilst
20162 they exist inside Thumb code, are not the entry points for
20163 possible ARM->Thumb calls. Also, these labels can be used
20164 as part of a computed goto or switch statement. eg gcc
20165 can generate code that looks like this:
20166
20167 ldr r2, [pc, .Laaa]
20168 lsl r3, r3, #2
20169 ldr r2, [r3, r2]
20170 mov pc, r2
20171
20172 .Lbbb: .word .Lxxx
20173 .Lccc: .word .Lyyy
20174 ..etc...
20175 .Laaa: .word Lbbb
20176
20177 The first instruction loads the address of the jump table.
20178 The second instruction converts a table index into a byte offset.
20179 The third instruction gets the jump address out of the table.
20180 The fourth instruction performs the jump.
20181
20182 If the address stored at .Laaa is that of a symbol which has the
20183 Thumb_Func bit set, then the linker will arrange for this address
20184 to have the bottom bit set, which in turn would mean that the
20185 address computation performed by the third instruction would end
20186 up with the bottom bit set. Since the ARM is capable of unaligned
20187 word loads, the instruction would then load the incorrect address
20188 out of the jump table, and chaos would ensue. */
20189 if (label_is_thumb_function_name
20190 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
20191 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
20192 {
20193 /* When the address of a Thumb function is taken the bottom
20194 bit of that address should be set. This will allow
20195 interworking between Arm and Thumb functions to work
20196 correctly. */
20197
20198 THUMB_SET_FUNC (sym, 1);
20199
20200 label_is_thumb_function_name = FALSE;
20201 }
20202
20203 dwarf2_emit_label (sym);
20204 }
20205
20206 bfd_boolean
20207 arm_data_in_code (void)
20208 {
20209 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
20210 {
20211 *input_line_pointer = '/';
20212 input_line_pointer += 5;
20213 *input_line_pointer = 0;
20214 return TRUE;
20215 }
20216
20217 return FALSE;
20218 }
20219
20220 char *
20221 arm_canonicalize_symbol_name (char * name)
20222 {
20223 int len;
20224
20225 if (thumb_mode && (len = strlen (name)) > 5
20226 && streq (name + len - 5, "/data"))
20227 *(name + len - 5) = 0;
20228
20229 return name;
20230 }
20231 \f
20232 /* Table of all register names defined by default. The user can
20233 define additional names with .req. Note that all register names
20234 should appear in both upper and lowercase variants. Some registers
20235 also have mixed-case names. */
20236
20237 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20238 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20239 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20240 #define REGSET(p,t) \
20241 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20242 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20243 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20244 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20245 #define REGSETH(p,t) \
20246 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20247 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20248 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20249 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20250 #define REGSET2(p,t) \
20251 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20252 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20253 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20254 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20255 #define SPLRBANK(base,bank,t) \
20256 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20257 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20258 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20259 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20260 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20261 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20262
20263 static const struct reg_entry reg_names[] =
20264 {
20265 /* ARM integer registers. */
20266 REGSET(r, RN), REGSET(R, RN),
20267
20268 /* ATPCS synonyms. */
20269 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
20270 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
20271 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
20272
20273 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
20274 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
20275 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
20276
20277 /* Well-known aliases. */
20278 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
20279 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
20280
20281 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
20282 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
20283
20284 /* Coprocessor numbers. */
20285 REGSET(p, CP), REGSET(P, CP),
20286
20287 /* Coprocessor register numbers. The "cr" variants are for backward
20288 compatibility. */
20289 REGSET(c, CN), REGSET(C, CN),
20290 REGSET(cr, CN), REGSET(CR, CN),
20291
20292 /* ARM banked registers. */
20293 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
20294 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
20295 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
20296 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
20297 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
20298 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
20299 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
20300
20301 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
20302 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
20303 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
20304 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
20305 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
20306 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
20307 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
20308 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
20309
20310 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
20311 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
20312 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
20313 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
20314 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
20315 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
20316 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
20317 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
20318 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
20319
20320 /* FPA registers. */
20321 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
20322 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
20323
20324 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
20325 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
20326
20327 /* VFP SP registers. */
20328 REGSET(s,VFS), REGSET(S,VFS),
20329 REGSETH(s,VFS), REGSETH(S,VFS),
20330
20331 /* VFP DP Registers. */
20332 REGSET(d,VFD), REGSET(D,VFD),
20333 /* Extra Neon DP registers. */
20334 REGSETH(d,VFD), REGSETH(D,VFD),
20335
20336 /* Neon QP registers. */
20337 REGSET2(q,NQ), REGSET2(Q,NQ),
20338
20339 /* VFP control registers. */
20340 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
20341 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
20342 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
20343 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
20344 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
20345 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
20346 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
20347
20348 /* Maverick DSP coprocessor registers. */
20349 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
20350 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
20351
20352 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
20353 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
20354 REGDEF(dspsc,0,DSPSC),
20355
20356 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
20357 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
20358 REGDEF(DSPSC,0,DSPSC),
20359
20360 /* iWMMXt data registers - p0, c0-15. */
20361 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
20362
20363 /* iWMMXt control registers - p1, c0-3. */
20364 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
20365 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
20366 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
20367 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
20368
20369 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20370 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
20371 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
20372 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
20373 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
20374
20375 /* XScale accumulator registers. */
20376 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
20377 };
20378 #undef REGDEF
20379 #undef REGNUM
20380 #undef REGSET
20381
20382 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20383 within psr_required_here. */
20384 static const struct asm_psr psrs[] =
20385 {
20386 /* Backward compatibility notation. Note that "all" is no longer
20387 truly all possible PSR bits. */
20388 {"all", PSR_c | PSR_f},
20389 {"flg", PSR_f},
20390 {"ctl", PSR_c},
20391
20392 /* Individual flags. */
20393 {"f", PSR_f},
20394 {"c", PSR_c},
20395 {"x", PSR_x},
20396 {"s", PSR_s},
20397
20398 /* Combinations of flags. */
20399 {"fs", PSR_f | PSR_s},
20400 {"fx", PSR_f | PSR_x},
20401 {"fc", PSR_f | PSR_c},
20402 {"sf", PSR_s | PSR_f},
20403 {"sx", PSR_s | PSR_x},
20404 {"sc", PSR_s | PSR_c},
20405 {"xf", PSR_x | PSR_f},
20406 {"xs", PSR_x | PSR_s},
20407 {"xc", PSR_x | PSR_c},
20408 {"cf", PSR_c | PSR_f},
20409 {"cs", PSR_c | PSR_s},
20410 {"cx", PSR_c | PSR_x},
20411 {"fsx", PSR_f | PSR_s | PSR_x},
20412 {"fsc", PSR_f | PSR_s | PSR_c},
20413 {"fxs", PSR_f | PSR_x | PSR_s},
20414 {"fxc", PSR_f | PSR_x | PSR_c},
20415 {"fcs", PSR_f | PSR_c | PSR_s},
20416 {"fcx", PSR_f | PSR_c | PSR_x},
20417 {"sfx", PSR_s | PSR_f | PSR_x},
20418 {"sfc", PSR_s | PSR_f | PSR_c},
20419 {"sxf", PSR_s | PSR_x | PSR_f},
20420 {"sxc", PSR_s | PSR_x | PSR_c},
20421 {"scf", PSR_s | PSR_c | PSR_f},
20422 {"scx", PSR_s | PSR_c | PSR_x},
20423 {"xfs", PSR_x | PSR_f | PSR_s},
20424 {"xfc", PSR_x | PSR_f | PSR_c},
20425 {"xsf", PSR_x | PSR_s | PSR_f},
20426 {"xsc", PSR_x | PSR_s | PSR_c},
20427 {"xcf", PSR_x | PSR_c | PSR_f},
20428 {"xcs", PSR_x | PSR_c | PSR_s},
20429 {"cfs", PSR_c | PSR_f | PSR_s},
20430 {"cfx", PSR_c | PSR_f | PSR_x},
20431 {"csf", PSR_c | PSR_s | PSR_f},
20432 {"csx", PSR_c | PSR_s | PSR_x},
20433 {"cxf", PSR_c | PSR_x | PSR_f},
20434 {"cxs", PSR_c | PSR_x | PSR_s},
20435 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
20436 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
20437 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
20438 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
20439 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
20440 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
20441 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
20442 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
20443 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
20444 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
20445 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
20446 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
20447 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
20448 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
20449 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
20450 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
20451 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
20452 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
20453 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
20454 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
20455 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
20456 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
20457 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
20458 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
20459 };
20460
20461 /* Table of V7M psr names. */
20462 static const struct asm_psr v7m_psrs[] =
20463 {
20464 {"apsr", 0x0 }, {"APSR", 0x0 },
20465 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20466 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20467 {"psr", 0x3 }, {"PSR", 0x3 },
20468 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20469 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20470 {"epsr", 0x6 }, {"EPSR", 0x6 },
20471 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20472 {"msp", 0x8 }, {"MSP", 0x8 },
20473 {"psp", 0x9 }, {"PSP", 0x9 },
20474 {"msplim", 0xa }, {"MSPLIM", 0xa },
20475 {"psplim", 0xb }, {"PSPLIM", 0xb },
20476 {"primask", 0x10}, {"PRIMASK", 0x10},
20477 {"basepri", 0x11}, {"BASEPRI", 0x11},
20478 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20479 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20480 {"control", 0x14}, {"CONTROL", 0x14},
20481 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20482 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20483 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20484 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20485 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20486 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20487 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20488 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20489 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20490 };
20491
20492 /* Table of all shift-in-operand names. */
20493 static const struct asm_shift_name shift_names [] =
20494 {
20495 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
20496 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
20497 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
20498 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
20499 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
20500 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
20501 };
20502
20503 /* Table of all explicit relocation names. */
20504 #ifdef OBJ_ELF
20505 static struct reloc_entry reloc_names[] =
20506 {
20507 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
20508 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
20509 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
20510 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
20511 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
20512 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
20513 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
20514 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
20515 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
20516 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
20517 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
20518 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
20519 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
20520 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
20521 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
20522 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
20523 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
20524 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
20525 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
20526 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
20527 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20528 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20529 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
20530 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
20531 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
20532 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
20533 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
20534 };
20535 #endif
20536
20537 /* Table of all conditional affixes. */
20538 static const struct asm_cond conds[] =
20539 {
20540 {"eq", 0x0},
20541 {"ne", 0x1},
20542 {"cs", 0x2}, {"hs", 0x2},
20543 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20544 {"mi", 0x4},
20545 {"pl", 0x5},
20546 {"vs", 0x6},
20547 {"vc", 0x7},
20548 {"hi", 0x8},
20549 {"ls", 0x9},
20550 {"ge", 0xa},
20551 {"lt", 0xb},
20552 {"gt", 0xc},
20553 {"le", 0xd},
20554 {"al", 0xe}
20555 };
20556 static const struct asm_cond vconds[] =
20557 {
20558 {"t", 0xf},
20559 {"e", 0x10}
20560 };
20561
20562 #define UL_BARRIER(L,U,CODE,FEAT) \
20563 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20564 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20565
20566 static struct asm_barrier_opt barrier_opt_names[] =
20567 {
20568 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
20569 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
20570 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
20571 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
20572 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
20573 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
20574 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
20575 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
20576 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
20577 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
20578 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
20579 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
20580 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
20581 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
20582 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
20583 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
20584 };
20585
20586 #undef UL_BARRIER
20587
20588 /* Table of ARM-format instructions. */
20589
20590 /* Macros for gluing together operand strings. N.B. In all cases
20591 other than OPS0, the trailing OP_stop comes from default
20592 zero-initialization of the unspecified elements of the array. */
20593 #define OPS0() { OP_stop, }
20594 #define OPS1(a) { OP_##a, }
20595 #define OPS2(a,b) { OP_##a,OP_##b, }
20596 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20597 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20598 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20599 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20600
20601 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20602 This is useful when mixing operands for ARM and THUMB, i.e. using the
20603 MIX_ARM_THUMB_OPERANDS macro.
20604 In order to use these macros, prefix the number of operands with _
20605 e.g. _3. */
20606 #define OPS_1(a) { a, }
20607 #define OPS_2(a,b) { a,b, }
20608 #define OPS_3(a,b,c) { a,b,c, }
20609 #define OPS_4(a,b,c,d) { a,b,c,d, }
20610 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20611 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20612
20613 /* These macros abstract out the exact format of the mnemonic table and
20614 save some repeated characters. */
20615
20616 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
20617 #define TxCE(mnem, op, top, nops, ops, ae, te) \
20618 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
20619 THUMB_VARIANT, do_##ae, do_##te, 0 }
20620
20621 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
20622 a T_MNEM_xyz enumerator. */
20623 #define TCE(mnem, aop, top, nops, ops, ae, te) \
20624 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
20625 #define tCE(mnem, aop, top, nops, ops, ae, te) \
20626 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20627
20628 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
20629 infix after the third character. */
20630 #define TxC3(mnem, op, top, nops, ops, ae, te) \
20631 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
20632 THUMB_VARIANT, do_##ae, do_##te, 0 }
20633 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
20634 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
20635 THUMB_VARIANT, do_##ae, do_##te, 0 }
20636 #define TC3(mnem, aop, top, nops, ops, ae, te) \
20637 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
20638 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
20639 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
20640 #define tC3(mnem, aop, top, nops, ops, ae, te) \
20641 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20642 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
20643 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20644
20645 /* Mnemonic that cannot be conditionalized. The ARM condition-code
20646 field is still 0xE. Many of the Thumb variants can be executed
20647 conditionally, so this is checked separately. */
20648 #define TUE(mnem, op, top, nops, ops, ae, te) \
20649 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20650 THUMB_VARIANT, do_##ae, do_##te, 0 }
20651
20652 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
20653 Used by mnemonics that have very minimal differences in the encoding for
20654 ARM and Thumb variants and can be handled in a common function. */
20655 #define TUEc(mnem, op, top, nops, ops, en) \
20656 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20657 THUMB_VARIANT, do_##en, do_##en, 0 }
20658
20659 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
20660 condition code field. */
20661 #define TUF(mnem, op, top, nops, ops, ae, te) \
20662 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
20663 THUMB_VARIANT, do_##ae, do_##te, 0 }
20664
20665 /* ARM-only variants of all the above. */
20666 #define CE(mnem, op, nops, ops, ae) \
20667 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20668
20669 #define C3(mnem, op, nops, ops, ae) \
20670 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20671
20672 /* Thumb-only variants of TCE and TUE. */
20673 #define ToC(mnem, top, nops, ops, te) \
20674 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20675 do_##te, 0 }
20676
20677 #define ToU(mnem, top, nops, ops, te) \
20678 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
20679 NULL, do_##te, 0 }
20680
20681 /* T_MNEM_xyz enumerator variants of ToC. */
20682 #define toC(mnem, top, nops, ops, te) \
20683 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
20684 do_##te, 0 }
20685
20686 /* T_MNEM_xyz enumerator variants of ToU. */
20687 #define toU(mnem, top, nops, ops, te) \
20688 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
20689 NULL, do_##te, 0 }
20690
20691 /* Legacy mnemonics that always have conditional infix after the third
20692 character. */
20693 #define CL(mnem, op, nops, ops, ae) \
20694 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20695 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20696
20697 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
20698 #define cCE(mnem, op, nops, ops, ae) \
20699 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20700
20701 /* Legacy coprocessor instructions where conditional infix and conditional
20702 suffix are ambiguous. For consistency this includes all FPA instructions,
20703 not just the potentially ambiguous ones. */
20704 #define cCL(mnem, op, nops, ops, ae) \
20705 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20706 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20707
20708 /* Coprocessor, takes either a suffix or a position-3 infix
20709 (for an FPA corner case). */
20710 #define C3E(mnem, op, nops, ops, ae) \
20711 { mnem, OPS##nops ops, OT_csuf_or_in3, \
20712 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20713
20714 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20715 { m1 #m2 m3, OPS##nops ops, \
20716 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20717 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20718
20719 #define CM(m1, m2, op, nops, ops, ae) \
20720 xCM_ (m1, , m2, op, nops, ops, ae), \
20721 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20722 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20723 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20724 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20725 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20726 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20727 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20728 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20729 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20730 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20731 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20732 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20733 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20734 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20735 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20736 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20737 xCM_ (m1, le, m2, op, nops, ops, ae), \
20738 xCM_ (m1, al, m2, op, nops, ops, ae)
20739
20740 #define UE(mnem, op, nops, ops, ae) \
20741 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20742
20743 #define UF(mnem, op, nops, ops, ae) \
20744 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20745
20746 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20747 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20748 use the same encoding function for each. */
20749 #define NUF(mnem, op, nops, ops, enc) \
20750 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20751 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20752
20753 /* Neon data processing, version which indirects through neon_enc_tab for
20754 the various overloaded versions of opcodes. */
20755 #define nUF(mnem, op, nops, ops, enc) \
20756 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20757 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20758
20759 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20760 version. */
20761 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20762 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20763 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20764
20765 #define NCE(mnem, op, nops, ops, enc) \
20766 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20767
20768 #define NCEF(mnem, op, nops, ops, enc) \
20769 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20770
20771 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20772 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20773 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20774 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20775
20776 #define nCE(mnem, op, nops, ops, enc) \
20777 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20778
20779 #define nCEF(mnem, op, nops, ops, enc) \
20780 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20781
20782 /* */
20783 #define mCEF(mnem, op, nops, ops, enc) \
20784 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
20785 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20786
20787
20788 /* nCEF but for MVE predicated instructions. */
20789 #define mnCEF(mnem, op, nops, ops, enc) \
20790 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20791
20792 /* nCE but for MVE predicated instructions. */
20793 #define mnCE(mnem, op, nops, ops, enc) \
20794 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20795
20796 /* NUF but for potentially MVE predicated instructions. */
20797 #define MNUF(mnem, op, nops, ops, enc) \
20798 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20799 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20800
20801 /* nUF but for potentially MVE predicated instructions. */
20802 #define mnUF(mnem, op, nops, ops, enc) \
20803 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20804 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20805
20806 /* ToC but for potentially MVE predicated instructions. */
20807 #define mToC(mnem, top, nops, ops, te) \
20808 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20809 do_##te, 1 }
20810
20811 /* NCE but for MVE predicated instructions. */
20812 #define MNCE(mnem, op, nops, ops, enc) \
20813 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20814
20815 /* NCEF but for MVE predicated instructions. */
20816 #define MNCEF(mnem, op, nops, ops, enc) \
20817 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20818 #define do_0 0
20819
20820 static const struct asm_opcode insns[] =
20821 {
20822 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20823 #define THUMB_VARIANT & arm_ext_v4t
20824 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
20825 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
20826 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
20827 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
20828 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
20829 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
20830 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
20831 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
20832 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
20833 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
20834 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
20835 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
20836 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
20837 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
20838 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
20839 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
20840
20841 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20842 for setting PSR flag bits. They are obsolete in V6 and do not
20843 have Thumb equivalents. */
20844 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20845 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20846 CL("tstp", 110f000, 2, (RR, SH), cmp),
20847 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20848 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20849 CL("cmpp", 150f000, 2, (RR, SH), cmp),
20850 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20851 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20852 CL("cmnp", 170f000, 2, (RR, SH), cmp),
20853
20854 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
20855 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
20856 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
20857 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
20858
20859 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
20860 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20861 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
20862 OP_RRnpc),
20863 OP_ADDRGLDR),ldst, t_ldst),
20864 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20865
20866 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20867 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20868 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20869 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20870 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20871 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20872
20873 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
20874 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
20875
20876 /* Pseudo ops. */
20877 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
20878 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
20879 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
20880 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
20881
20882 /* Thumb-compatibility pseudo ops. */
20883 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
20884 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
20885 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
20886 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
20887 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
20888 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
20889 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
20890 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
20891 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
20892 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
20893 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
20894 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
20895
20896 /* These may simplify to neg. */
20897 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
20898 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
20899
20900 #undef THUMB_VARIANT
20901 #define THUMB_VARIANT & arm_ext_os
20902
20903 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
20904 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
20905
20906 #undef THUMB_VARIANT
20907 #define THUMB_VARIANT & arm_ext_v6
20908
20909 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
20910
20911 /* V1 instructions with no Thumb analogue prior to V6T2. */
20912 #undef THUMB_VARIANT
20913 #define THUMB_VARIANT & arm_ext_v6t2
20914
20915 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20916 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20917 CL("teqp", 130f000, 2, (RR, SH), cmp),
20918
20919 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20920 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20921 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
20922 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20923
20924 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20925 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20926
20927 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20928 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20929
20930 /* V1 instructions with no Thumb analogue at all. */
20931 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
20932 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
20933
20934 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
20935 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
20936 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
20937 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
20938 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
20939 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
20940 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
20941 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
20942
20943 #undef ARM_VARIANT
20944 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20945 #undef THUMB_VARIANT
20946 #define THUMB_VARIANT & arm_ext_v4t
20947
20948 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20949 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20950
20951 #undef THUMB_VARIANT
20952 #define THUMB_VARIANT & arm_ext_v6t2
20953
20954 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20955 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
20956
20957 /* Generic coprocessor instructions. */
20958 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20959 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20960 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20961 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20962 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20963 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20964 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
20965
20966 #undef ARM_VARIANT
20967 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20968
20969 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20970 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20971
20972 #undef ARM_VARIANT
20973 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20974 #undef THUMB_VARIANT
20975 #define THUMB_VARIANT & arm_ext_msr
20976
20977 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
20978 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
20979
20980 #undef ARM_VARIANT
20981 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20982 #undef THUMB_VARIANT
20983 #define THUMB_VARIANT & arm_ext_v6t2
20984
20985 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20986 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20987 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20988 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20989 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20990 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20991 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20992 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20993
20994 #undef ARM_VARIANT
20995 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20996 #undef THUMB_VARIANT
20997 #define THUMB_VARIANT & arm_ext_v4t
20998
20999 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21000 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21001 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21002 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21003 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21004 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21005
21006 #undef ARM_VARIANT
21007 #define ARM_VARIANT & arm_ext_v4t_5
21008
21009 /* ARM Architecture 4T. */
21010 /* Note: bx (and blx) are required on V5, even if the processor does
21011 not support Thumb. */
21012 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
21013
21014 #undef ARM_VARIANT
21015 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21016 #undef THUMB_VARIANT
21017 #define THUMB_VARIANT & arm_ext_v5t
21018
21019 /* Note: blx has 2 variants; the .value coded here is for
21020 BLX(2). Only this variant has conditional execution. */
21021 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
21022 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
21023
21024 #undef THUMB_VARIANT
21025 #define THUMB_VARIANT & arm_ext_v6t2
21026
21027 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
21028 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21029 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21030 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21031 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21032 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21033 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21034 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21035
21036 #undef ARM_VARIANT
21037 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21038 #undef THUMB_VARIANT
21039 #define THUMB_VARIANT & arm_ext_v5exp
21040
21041 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21042 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21043 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21044 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21045
21046 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21047 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21048
21049 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21050 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21051 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21052 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21053
21054 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21055 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21056 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21057 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21058
21059 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21060 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21061
21062 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21063 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21064 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21065 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21066
21067 #undef ARM_VARIANT
21068 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21069 #undef THUMB_VARIANT
21070 #define THUMB_VARIANT & arm_ext_v6t2
21071
21072 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
21073 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
21074 ldrd, t_ldstd),
21075 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
21076 ADDRGLDRS), ldrd, t_ldstd),
21077
21078 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21079 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21080
21081 #undef ARM_VARIANT
21082 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21083
21084 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
21085
21086 #undef ARM_VARIANT
21087 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21088 #undef THUMB_VARIANT
21089 #define THUMB_VARIANT & arm_ext_v6
21090
21091 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
21092 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
21093 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21094 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21095 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21096 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21097 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21098 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21099 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21100 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
21101
21102 #undef THUMB_VARIANT
21103 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21104
21105 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
21106 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21107 strex, t_strex),
21108 #undef THUMB_VARIANT
21109 #define THUMB_VARIANT & arm_ext_v6t2
21110
21111 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21112 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21113
21114 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
21115 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
21116
21117 /* ARM V6 not included in V7M. */
21118 #undef THUMB_VARIANT
21119 #define THUMB_VARIANT & arm_ext_v6_notm
21120 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21121 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21122 UF(rfeib, 9900a00, 1, (RRw), rfe),
21123 UF(rfeda, 8100a00, 1, (RRw), rfe),
21124 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21125 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21126 UF(rfefa, 8100a00, 1, (RRw), rfe),
21127 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21128 UF(rfeed, 9900a00, 1, (RRw), rfe),
21129 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21130 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21131 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21132 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
21133 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
21134 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
21135 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
21136 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21137 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21138 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
21139
21140 /* ARM V6 not included in V7M (eg. integer SIMD). */
21141 #undef THUMB_VARIANT
21142 #define THUMB_VARIANT & arm_ext_v6_dsp
21143 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
21144 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
21145 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21146 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21147 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21148 /* Old name for QASX. */
21149 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21150 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21151 /* Old name for QSAX. */
21152 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21153 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21154 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21155 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21156 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21157 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21158 /* Old name for SASX. */
21159 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21160 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21161 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21162 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21163 /* Old name for SHASX. */
21164 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21165 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21166 /* Old name for SHSAX. */
21167 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21168 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21169 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21170 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21171 /* Old name for SSAX. */
21172 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21173 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21174 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21175 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21176 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21177 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21178 /* Old name for UASX. */
21179 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21180 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21181 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21182 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21183 /* Old name for UHASX. */
21184 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21185 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21186 /* Old name for UHSAX. */
21187 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21188 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21189 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21190 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21191 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21192 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21193 /* Old name for UQASX. */
21194 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21195 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21196 /* Old name for UQSAX. */
21197 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21198 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21199 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21200 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21201 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21202 /* Old name for USAX. */
21203 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21204 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21205 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21206 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21207 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21208 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21209 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21210 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21211 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21212 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21213 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21214 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21215 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21216 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21217 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21218 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21219 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21220 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21221 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21222 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21223 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21224 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21225 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21226 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21227 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21228 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21229 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21230 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21231 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21232 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
21233 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
21234 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21235 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21236 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
21237
21238 #undef ARM_VARIANT
21239 #define ARM_VARIANT & arm_ext_v6k_v6t2
21240 #undef THUMB_VARIANT
21241 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21242
21243 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
21244 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
21245 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
21246 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
21247
21248 #undef THUMB_VARIANT
21249 #define THUMB_VARIANT & arm_ext_v6_notm
21250 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
21251 ldrexd, t_ldrexd),
21252 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
21253 RRnpcb), strexd, t_strexd),
21254
21255 #undef THUMB_VARIANT
21256 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21257 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
21258 rd_rn, rd_rn),
21259 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
21260 rd_rn, rd_rn),
21261 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21262 strex, t_strexbh),
21263 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21264 strex, t_strexbh),
21265 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
21266
21267 #undef ARM_VARIANT
21268 #define ARM_VARIANT & arm_ext_sec
21269 #undef THUMB_VARIANT
21270 #define THUMB_VARIANT & arm_ext_sec
21271
21272 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
21273
21274 #undef ARM_VARIANT
21275 #define ARM_VARIANT & arm_ext_virt
21276 #undef THUMB_VARIANT
21277 #define THUMB_VARIANT & arm_ext_virt
21278
21279 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
21280 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
21281
21282 #undef ARM_VARIANT
21283 #define ARM_VARIANT & arm_ext_pan
21284 #undef THUMB_VARIANT
21285 #define THUMB_VARIANT & arm_ext_pan
21286
21287 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
21288
21289 #undef ARM_VARIANT
21290 #define ARM_VARIANT & arm_ext_v6t2
21291 #undef THUMB_VARIANT
21292 #define THUMB_VARIANT & arm_ext_v6t2
21293
21294 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
21295 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
21296 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21297 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21298
21299 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21300 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
21301
21302 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21303 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21304 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21305 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21306
21307 #undef ARM_VARIANT
21308 #define ARM_VARIANT & arm_ext_v3
21309 #undef THUMB_VARIANT
21310 #define THUMB_VARIANT & arm_ext_v6t2
21311
21312 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
21313 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
21314 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
21315
21316 #undef ARM_VARIANT
21317 #define ARM_VARIANT & arm_ext_v6t2
21318 #undef THUMB_VARIANT
21319 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21320 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
21321 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
21322
21323 /* Thumb-only instructions. */
21324 #undef ARM_VARIANT
21325 #define ARM_VARIANT NULL
21326 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
21327 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
21328
21329 /* ARM does not really have an IT instruction, so always allow it.
21330 The opcode is copied from Thumb in order to allow warnings in
21331 -mimplicit-it=[never | arm] modes. */
21332 #undef ARM_VARIANT
21333 #define ARM_VARIANT & arm_ext_v1
21334 #undef THUMB_VARIANT
21335 #define THUMB_VARIANT & arm_ext_v6t2
21336
21337 TUE("it", bf08, bf08, 1, (COND), it, t_it),
21338 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
21339 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
21340 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
21341 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
21342 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
21343 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
21344 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
21345 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
21346 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
21347 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
21348 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
21349 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
21350 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
21351 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
21352 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21353 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
21354 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
21355
21356 /* Thumb2 only instructions. */
21357 #undef ARM_VARIANT
21358 #define ARM_VARIANT NULL
21359
21360 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21361 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21362 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
21363 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
21364 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
21365 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
21366
21367 /* Hardware division instructions. */
21368 #undef ARM_VARIANT
21369 #define ARM_VARIANT & arm_ext_adiv
21370 #undef THUMB_VARIANT
21371 #define THUMB_VARIANT & arm_ext_div
21372
21373 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
21374 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
21375
21376 /* ARM V6M/V7 instructions. */
21377 #undef ARM_VARIANT
21378 #define ARM_VARIANT & arm_ext_barrier
21379 #undef THUMB_VARIANT
21380 #define THUMB_VARIANT & arm_ext_barrier
21381
21382 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
21383 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
21384 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
21385
21386 /* ARM V7 instructions. */
21387 #undef ARM_VARIANT
21388 #define ARM_VARIANT & arm_ext_v7
21389 #undef THUMB_VARIANT
21390 #define THUMB_VARIANT & arm_ext_v7
21391
21392 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
21393 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
21394
21395 #undef ARM_VARIANT
21396 #define ARM_VARIANT & arm_ext_mp
21397 #undef THUMB_VARIANT
21398 #define THUMB_VARIANT & arm_ext_mp
21399
21400 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
21401
21402 /* AArchv8 instructions. */
21403 #undef ARM_VARIANT
21404 #define ARM_VARIANT & arm_ext_v8
21405
21406 /* Instructions shared between armv8-a and armv8-m. */
21407 #undef THUMB_VARIANT
21408 #define THUMB_VARIANT & arm_ext_atomics
21409
21410 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21411 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21412 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21413 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21414 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21415 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21416 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21417 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
21418 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21419 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
21420 stlex, t_stlex),
21421 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
21422 stlex, t_stlex),
21423 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
21424 stlex, t_stlex),
21425 #undef THUMB_VARIANT
21426 #define THUMB_VARIANT & arm_ext_v8
21427
21428 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
21429 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
21430 ldrexd, t_ldrexd),
21431 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
21432 strexd, t_strexd),
21433
21434 /* Defined in V8 but is in undefined encoding space for earlier
21435 architectures. However earlier architectures are required to treat
21436 this instuction as a semihosting trap as well. Hence while not explicitly
21437 defined as such, it is in fact correct to define the instruction for all
21438 architectures. */
21439 #undef THUMB_VARIANT
21440 #define THUMB_VARIANT & arm_ext_v1
21441 #undef ARM_VARIANT
21442 #define ARM_VARIANT & arm_ext_v1
21443 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
21444
21445 /* ARMv8 T32 only. */
21446 #undef ARM_VARIANT
21447 #define ARM_VARIANT NULL
21448 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
21449 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
21450 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
21451
21452 /* FP for ARMv8. */
21453 #undef ARM_VARIANT
21454 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21455 #undef THUMB_VARIANT
21456 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21457
21458 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
21459 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
21460 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
21461 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
21462 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21463 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21464 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
21465 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
21466 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
21467 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
21468 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
21469 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
21470 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
21471 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
21472 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
21473 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
21474 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
21475
21476 /* Crypto v1 extensions. */
21477 #undef ARM_VARIANT
21478 #define ARM_VARIANT & fpu_crypto_ext_armv8
21479 #undef THUMB_VARIANT
21480 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21481
21482 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
21483 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
21484 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
21485 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
21486 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
21487 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
21488 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
21489 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
21490 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
21491 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
21492 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
21493 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
21494 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
21495 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
21496
21497 #undef ARM_VARIANT
21498 #define ARM_VARIANT & crc_ext_armv8
21499 #undef THUMB_VARIANT
21500 #define THUMB_VARIANT & crc_ext_armv8
21501 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
21502 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
21503 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
21504 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
21505 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
21506 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
21507
21508 /* ARMv8.2 RAS extension. */
21509 #undef ARM_VARIANT
21510 #define ARM_VARIANT & arm_ext_ras
21511 #undef THUMB_VARIANT
21512 #define THUMB_VARIANT & arm_ext_ras
21513 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
21514
21515 #undef ARM_VARIANT
21516 #define ARM_VARIANT & arm_ext_v8_3
21517 #undef THUMB_VARIANT
21518 #define THUMB_VARIANT & arm_ext_v8_3
21519 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
21520 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
21521 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
21522
21523 #undef ARM_VARIANT
21524 #define ARM_VARIANT & fpu_neon_ext_dotprod
21525 #undef THUMB_VARIANT
21526 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21527 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
21528 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
21529
21530 #undef ARM_VARIANT
21531 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21532 #undef THUMB_VARIANT
21533 #define THUMB_VARIANT NULL
21534
21535 cCE("wfs", e200110, 1, (RR), rd),
21536 cCE("rfs", e300110, 1, (RR), rd),
21537 cCE("wfc", e400110, 1, (RR), rd),
21538 cCE("rfc", e500110, 1, (RR), rd),
21539
21540 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
21541 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
21542 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
21543 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
21544
21545 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
21546 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
21547 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
21548 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
21549
21550 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
21551 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
21552 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
21553 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
21554 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
21555 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
21556 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
21557 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
21558 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
21559 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
21560 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
21561 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
21562
21563 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
21564 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
21565 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
21566 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
21567 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
21568 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
21569 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
21570 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
21571 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
21572 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
21573 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
21574 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
21575
21576 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
21577 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
21578 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
21579 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
21580 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
21581 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
21582 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
21583 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
21584 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
21585 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
21586 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
21587 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
21588
21589 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
21590 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
21591 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
21592 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
21593 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
21594 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
21595 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
21596 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
21597 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
21598 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
21599 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
21600 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
21601
21602 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
21603 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
21604 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
21605 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
21606 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
21607 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
21608 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
21609 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
21610 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
21611 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
21612 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
21613 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
21614
21615 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
21616 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
21617 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
21618 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
21619 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
21620 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
21621 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
21622 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
21623 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
21624 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
21625 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
21626 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
21627
21628 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
21629 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
21630 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
21631 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
21632 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
21633 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
21634 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
21635 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
21636 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
21637 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
21638 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
21639 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
21640
21641 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
21642 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
21643 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
21644 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
21645 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
21646 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
21647 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
21648 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
21649 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
21650 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
21651 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
21652 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
21653
21654 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
21655 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
21656 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
21657 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
21658 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
21659 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
21660 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
21661 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
21662 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
21663 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
21664 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
21665 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
21666
21667 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
21668 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
21669 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
21670 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
21671 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
21672 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
21673 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
21674 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
21675 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
21676 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
21677 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
21678 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
21679
21680 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
21681 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
21682 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
21683 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
21684 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
21685 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
21686 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
21687 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
21688 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
21689 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
21690 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
21691 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
21692
21693 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
21694 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
21695 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
21696 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
21697 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
21698 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
21699 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
21700 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
21701 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
21702 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
21703 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
21704 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
21705
21706 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
21707 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
21708 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
21709 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
21710 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
21711 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
21712 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
21713 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
21714 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
21715 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
21716 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
21717 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
21718
21719 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
21720 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
21721 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
21722 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
21723 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
21724 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
21725 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
21726 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
21727 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
21728 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
21729 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
21730 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
21731
21732 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
21733 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
21734 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
21735 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
21736 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
21737 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
21738 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
21739 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
21740 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
21741 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
21742 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
21743 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
21744
21745 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
21746 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
21747 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
21748 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
21749 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
21750 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
21751 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
21752 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
21753 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
21754 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
21755 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
21756 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
21757
21758 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
21759 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
21760 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
21761 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
21762 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
21763 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21764 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21765 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21766 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
21767 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
21768 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
21769 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
21770
21771 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
21772 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
21773 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
21774 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
21775 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
21776 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21777 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21778 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21779 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
21780 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
21781 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
21782 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
21783
21784 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
21785 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
21786 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
21787 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
21788 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
21789 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21790 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21791 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21792 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
21793 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
21794 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
21795 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
21796
21797 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
21798 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
21799 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
21800 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
21801 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
21802 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21803 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21804 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21805 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
21806 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
21807 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
21808 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
21809
21810 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
21811 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
21812 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
21813 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
21814 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
21815 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21816 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21817 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21818 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
21819 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
21820 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
21821 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
21822
21823 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
21824 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
21825 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
21826 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
21827 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
21828 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21829 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21830 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21831 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
21832 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
21833 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
21834 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
21835
21836 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
21837 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
21838 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
21839 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
21840 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
21841 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21842 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21843 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21844 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
21845 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
21846 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
21847 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
21848
21849 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
21850 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
21851 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
21852 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
21853 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
21854 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21855 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21856 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21857 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
21858 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
21859 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
21860 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
21861
21862 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
21863 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
21864 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
21865 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
21866 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
21867 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21868 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21869 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21870 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
21871 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
21872 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
21873 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
21874
21875 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
21876 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
21877 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
21878 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
21879 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
21880 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21881 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21882 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21883 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
21884 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
21885 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
21886 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
21887
21888 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21889 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21890 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21891 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21892 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21893 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21894 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21895 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21896 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21897 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21898 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21899 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21900
21901 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21902 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21903 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21904 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21905 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21906 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21907 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21908 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21909 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21910 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21911 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21912 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21913
21914 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21915 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21916 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21917 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21918 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21919 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21920 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21921 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21922 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21923 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21924 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21925 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21926
21927 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
21928 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
21929 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
21930 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
21931
21932 cCL("flts", e000110, 2, (RF, RR), rn_rd),
21933 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
21934 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
21935 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
21936 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
21937 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
21938 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
21939 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
21940 cCL("flte", e080110, 2, (RF, RR), rn_rd),
21941 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
21942 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
21943 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
21944
21945 /* The implementation of the FIX instruction is broken on some
21946 assemblers, in that it accepts a precision specifier as well as a
21947 rounding specifier, despite the fact that this is meaningless.
21948 To be more compatible, we accept it as well, though of course it
21949 does not set any bits. */
21950 cCE("fix", e100110, 2, (RR, RF), rd_rm),
21951 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
21952 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
21953 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
21954 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
21955 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
21956 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
21957 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
21958 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
21959 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
21960 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
21961 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
21962 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
21963
21964 /* Instructions that were new with the real FPA, call them V2. */
21965 #undef ARM_VARIANT
21966 #define ARM_VARIANT & fpu_fpa_ext_v2
21967
21968 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21969 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21970 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21971 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21972 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21973 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21974
21975 #undef ARM_VARIANT
21976 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21977
21978 /* Moves and type conversions. */
21979 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
21980 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
21981 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
21982 cCE("fmstat", ef1fa10, 0, (), noargs),
21983 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
21984 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
21985 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
21986 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
21987 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
21988 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21989 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
21990 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21991 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
21992 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
21993
21994 /* Memory operations. */
21995 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21996 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21997 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21998 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21999 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22000 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22001 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22002 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22003 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22004 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22005 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22006 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22007 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22008 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22009 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22010 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22011 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22012 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22013
22014 /* Monadic operations. */
22015 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
22016 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
22017 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
22018
22019 /* Dyadic operations. */
22020 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22021 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22022 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22023 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22024 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22025 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22026 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22027 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22028 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22029
22030 /* Comparisons. */
22031 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
22032 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
22033 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
22034 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
22035
22036 /* Double precision load/store are still present on single precision
22037 implementations. */
22038 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22039 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22040 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22041 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22042 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22043 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22044 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22045 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22046 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22047 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22048
22049 #undef ARM_VARIANT
22050 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22051
22052 /* Moves and type conversions. */
22053 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22054 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22055 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22056 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
22057 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
22058 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
22059 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
22060 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22061 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
22062 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22063 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22064 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22065 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22066
22067 /* Monadic operations. */
22068 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22069 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22070 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22071
22072 /* Dyadic operations. */
22073 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22074 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22075 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22076 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22077 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22078 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22079 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22080 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22081 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22082
22083 /* Comparisons. */
22084 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22085 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
22086 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22087 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
22088
22089 #undef ARM_VARIANT
22090 #define ARM_VARIANT & fpu_vfp_ext_v2
22091
22092 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
22093 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
22094 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
22095 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
22096
22097 /* Instructions which may belong to either the Neon or VFP instruction sets.
22098 Individual encoder functions perform additional architecture checks. */
22099 #undef ARM_VARIANT
22100 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22101 #undef THUMB_VARIANT
22102 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22103
22104 /* These mnemonics are unique to VFP. */
22105 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
22106 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
22107 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22108 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22109 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22110 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22111 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22112 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
22113 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
22114 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
22115
22116 /* Mnemonics shared by Neon and VFP. */
22117 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
22118 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22119 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22120
22121 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22122 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22123 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22124 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22125 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22126 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22127
22128 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
22129 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
22130 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
22131 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
22132
22133
22134 /* NOTE: All VMOV encoding is special-cased! */
22135 NCE(vmov, 0, 1, (VMOV), neon_mov),
22136 NCE(vmovq, 0, 1, (VMOV), neon_mov),
22137
22138 #undef THUMB_VARIANT
22139 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22140 by different feature bits. Since we are setting the Thumb guard, we can
22141 require Thumb-1 which makes it a nop guard and set the right feature bit in
22142 do_vldr_vstr (). */
22143 #define THUMB_VARIANT & arm_ext_v4t
22144 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22145 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22146
22147 #undef ARM_VARIANT
22148 #define ARM_VARIANT & arm_ext_fp16
22149 #undef THUMB_VARIANT
22150 #define THUMB_VARIANT & arm_ext_fp16
22151 /* New instructions added from v8.2, allowing the extraction and insertion of
22152 the upper 16 bits of a 32-bit vector register. */
22153 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
22154 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
22155
22156 /* New backported fma/fms instructions optional in v8.2. */
22157 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
22158 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
22159
22160 #undef THUMB_VARIANT
22161 #define THUMB_VARIANT & fpu_neon_ext_v1
22162 #undef ARM_VARIANT
22163 #define ARM_VARIANT & fpu_neon_ext_v1
22164
22165 /* Data processing with three registers of the same length. */
22166 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22167 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
22168 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
22169 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22170 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22171 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22172 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22173 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22174 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22175 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22176 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22177 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22178 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22179 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22180 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22181 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22182 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22183 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22184 /* If not immediate, fall back to neon_dyadic_i64_su.
22185 shl_imm should accept I8 I16 I32 I64,
22186 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22187 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
22188 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
22189 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
22190 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
22191 /* Logic ops, types optional & ignored. */
22192 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22193 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22194 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22195 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22196 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22197 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22198 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22199 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22200 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
22201 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
22202 /* Bitfield ops, untyped. */
22203 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22204 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22205 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22206 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22207 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22208 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22209 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22210 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22211 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22212 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22213 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22214 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22215 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22216 back to neon_dyadic_if_su. */
22217 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22218 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22219 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22220 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22221 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22222 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22223 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22224 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22225 /* Comparison. Type I8 I16 I32 F32. */
22226 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
22227 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
22228 /* As above, D registers only. */
22229 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22230 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22231 /* Int and float variants, signedness unimportant. */
22232 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22233 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22234 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
22235 /* Add/sub take types I8 I16 I32 I64 F32. */
22236 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22237 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22238 /* vtst takes sizes 8, 16, 32. */
22239 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
22240 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
22241 /* VMUL takes I8 I16 I32 F32 P8. */
22242 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
22243 /* VQD{R}MULH takes S16 S32. */
22244 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22245 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22246 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22247 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22248 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22249 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22250 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22251 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22252 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22253 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22254 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22255 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22256 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22257 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22258 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22259 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22260 /* ARM v8.1 extension. */
22261 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22262 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22263 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22264 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22265
22266 /* Two address, int/float. Types S8 S16 S32 F32. */
22267 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
22268 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
22269
22270 /* Data processing with two registers and a shift amount. */
22271 /* Right shifts, and variants with rounding.
22272 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22273 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22274 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22275 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22276 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22277 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22278 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22279 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22280 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22281 /* Shift and insert. Sizes accepted 8 16 32 64. */
22282 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
22283 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
22284 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
22285 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
22286 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22287 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
22288 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
22289 /* Right shift immediate, saturating & narrowing, with rounding variants.
22290 Types accepted S16 S32 S64 U16 U32 U64. */
22291 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22292 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22293 /* As above, unsigned. Types accepted S16 S32 S64. */
22294 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22295 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22296 /* Right shift narrowing. Types accepted I16 I32 I64. */
22297 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22298 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22299 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22300 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
22301 /* CVT with optional immediate for fixed-point variant. */
22302 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
22303
22304 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
22305 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
22306
22307 /* Data processing, three registers of different lengths. */
22308 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22309 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
22310 /* If not scalar, fall back to neon_dyadic_long.
22311 Vector types as above, scalar types S16 S32 U16 U32. */
22312 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22313 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22314 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22315 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22316 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22317 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22318 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22319 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22320 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22321 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22322 /* Saturating doubling multiplies. Types S16 S32. */
22323 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22324 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22325 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22326 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22327 S16 S32 U16 U32. */
22328 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
22329
22330 /* Extract. Size 8. */
22331 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
22332 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
22333
22334 /* Two registers, miscellaneous. */
22335 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22336 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
22337 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
22338 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
22339 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
22340 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
22341 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
22342 /* Vector replicate. Sizes 8 16 32. */
22343 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
22344 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
22345 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22346 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
22347 /* VMOVN. Types I16 I32 I64. */
22348 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
22349 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22350 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
22351 /* VQMOVUN. Types S16 S32 S64. */
22352 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
22353 /* VZIP / VUZP. Sizes 8 16 32. */
22354 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
22355 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
22356 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
22357 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
22358 /* VQABS / VQNEG. Types S8 S16 S32. */
22359 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22360 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
22361 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22362 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
22363 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22364 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
22365 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
22366 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
22367 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
22368 /* Reciprocal estimates. Types U32 F16 F32. */
22369 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
22370 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
22371 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
22372 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
22373 /* VCLS. Types S8 S16 S32. */
22374 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
22375 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
22376 /* VCLZ. Types I8 I16 I32. */
22377 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
22378 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
22379 /* VCNT. Size 8. */
22380 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
22381 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
22382 /* Two address, untyped. */
22383 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
22384 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
22385 /* VTRN. Sizes 8 16 32. */
22386 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
22387 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
22388
22389 /* Table lookup. Size 8. */
22390 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22391 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22392
22393 #undef THUMB_VARIANT
22394 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22395 #undef ARM_VARIANT
22396 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22397
22398 /* Neon element/structure load/store. */
22399 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22400 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22401 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22402 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22403 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22404 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22405 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22406 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22407
22408 #undef THUMB_VARIANT
22409 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22410 #undef ARM_VARIANT
22411 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22412 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
22413 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22414 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22415 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22416 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22417 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22418 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22419 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22420 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22421
22422 #undef THUMB_VARIANT
22423 #define THUMB_VARIANT & fpu_vfp_ext_v3
22424 #undef ARM_VARIANT
22425 #define ARM_VARIANT & fpu_vfp_ext_v3
22426
22427 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
22428 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22429 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22430 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22431 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22432 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22433 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22434 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22435 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22436
22437 #undef ARM_VARIANT
22438 #define ARM_VARIANT & fpu_vfp_ext_fma
22439 #undef THUMB_VARIANT
22440 #define THUMB_VARIANT & fpu_vfp_ext_fma
22441 /* Mnemonics shared by Neon and VFP. These are included in the
22442 VFP FMA variant; NEON and VFP FMA always includes the NEON
22443 FMA instructions. */
22444 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22445 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22446 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22447 the v form should always be used. */
22448 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22449 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22450 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22451 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22452 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22453 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22454
22455 #undef THUMB_VARIANT
22456 #undef ARM_VARIANT
22457 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22458
22459 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22460 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22461 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22462 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22463 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22464 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22465 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
22466 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
22467
22468 #undef ARM_VARIANT
22469 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22470
22471 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
22472 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
22473 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
22474 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
22475 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
22476 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
22477 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
22478 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
22479 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
22480 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22481 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22482 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22483 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22484 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22485 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22486 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22487 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22488 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22489 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
22490 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
22491 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22492 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22493 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22494 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22495 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22496 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22497 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
22498 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
22499 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
22500 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
22501 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
22502 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
22503 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
22504 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
22505 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
22506 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
22507 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
22508 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22509 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22510 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22511 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22512 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22513 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22514 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22515 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22516 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22517 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
22518 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22519 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22520 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22521 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22522 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22523 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22524 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22525 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22526 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22527 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22528 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22529 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22530 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22531 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22532 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22533 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22534 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22535 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22536 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22537 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22538 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22539 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22540 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22541 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22542 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22543 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22544 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22545 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22546 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22547 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22548 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22549 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22550 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22551 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22552 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22553 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22554 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22555 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22556 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22557 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22558 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22559 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
22560 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22561 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22562 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22563 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22564 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22565 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22566 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22567 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22568 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22569 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22570 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22571 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22572 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22573 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22574 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22575 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22576 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22577 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22578 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22579 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22580 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22581 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
22582 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22583 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22584 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22585 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22586 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22587 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22588 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22589 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22590 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22591 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22592 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22593 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22594 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22595 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22596 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22597 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22598 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22599 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22600 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22601 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22602 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22603 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22604 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22605 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22606 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22607 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22608 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22609 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22610 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22611 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22612 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22613 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
22614 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
22615 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
22616 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
22617 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
22618 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
22619 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22620 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22621 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22622 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
22623 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
22624 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
22625 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
22626 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
22627 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
22628 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22629 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22630 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22631 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22632 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
22633
22634 #undef ARM_VARIANT
22635 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
22636
22637 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
22638 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
22639 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
22640 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
22641 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
22642 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
22643 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22644 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22645 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22646 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22647 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22648 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22649 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22650 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22651 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22652 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22653 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22654 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22655 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22656 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22657 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
22658 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22659 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22660 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22661 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22662 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22663 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22664 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22665 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22666 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22667 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22668 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22669 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22670 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22671 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22672 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22673 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22674 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22675 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22676 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22677 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22678 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22679 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22680 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22681 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22682 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22683 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22684 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22685 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22686 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22687 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22688 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22689 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22690 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22691 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22692 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22693 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22694
22695 #undef ARM_VARIANT
22696 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
22697
22698 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
22699 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
22700 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
22701 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
22702 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
22703 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
22704 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
22705 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
22706 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
22707 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
22708 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
22709 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
22710 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
22711 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
22712 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
22713 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
22714 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
22715 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
22716 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
22717 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
22718 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
22719 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
22720 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
22721 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
22722 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
22723 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
22724 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
22725 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
22726 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
22727 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
22728 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
22729 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
22730 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
22731 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
22732 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
22733 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
22734 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
22735 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
22736 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
22737 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
22738 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
22739 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
22740 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
22741 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
22742 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
22743 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
22744 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
22745 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
22746 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
22747 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
22748 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
22749 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
22750 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
22751 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
22752 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
22753 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
22754 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
22755 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
22756 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
22757 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
22758 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
22759 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
22760 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
22761 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
22762 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22763 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22764 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22765 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22766 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22767 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22768 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22769 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22770 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22771 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22772 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22773 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22774
22775 /* ARMv8.5-A instructions. */
22776 #undef ARM_VARIANT
22777 #define ARM_VARIANT & arm_ext_sb
22778 #undef THUMB_VARIANT
22779 #define THUMB_VARIANT & arm_ext_sb
22780 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
22781
22782 #undef ARM_VARIANT
22783 #define ARM_VARIANT & arm_ext_predres
22784 #undef THUMB_VARIANT
22785 #define THUMB_VARIANT & arm_ext_predres
22786 CE("cfprctx", e070f93, 1, (RRnpc), rd),
22787 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
22788 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
22789
22790 /* ARMv8-M instructions. */
22791 #undef ARM_VARIANT
22792 #define ARM_VARIANT NULL
22793 #undef THUMB_VARIANT
22794 #define THUMB_VARIANT & arm_ext_v8m
22795 ToU("sg", e97fe97f, 0, (), noargs),
22796 ToC("blxns", 4784, 1, (RRnpc), t_blx),
22797 ToC("bxns", 4704, 1, (RRnpc), t_bx),
22798 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
22799 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
22800 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
22801 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
22802
22803 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22804 instructions behave as nop if no VFP is present. */
22805 #undef THUMB_VARIANT
22806 #define THUMB_VARIANT & arm_ext_v8m_main
22807 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
22808 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
22809
22810 /* Armv8.1-M Mainline instructions. */
22811 #undef THUMB_VARIANT
22812 #define THUMB_VARIANT & arm_ext_v8_1m_main
22813 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
22814 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
22815 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
22816 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
22817 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
22818
22819 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
22820 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
22821 toU("le", _le, 2, (oLR, EXP), t_loloop),
22822
22823 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
22824 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
22825
22826 #undef THUMB_VARIANT
22827 #define THUMB_VARIANT & mve_ext
22828 ToC("vpst", fe710f4d, 0, (), mve_vpt),
22829 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
22830 ToC("vpste", fe718f4d, 0, (), mve_vpt),
22831 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
22832 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
22833 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
22834 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
22835 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
22836 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
22837 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
22838 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
22839 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
22840 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
22841 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
22842 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
22843
22844 /* MVE and MVE FP only. */
22845 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
22846 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
22847 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22848 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22849 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
22850 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
22851 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22852 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22853 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
22854 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
22855 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
22856 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
22857
22858 #undef ARM_VARIANT
22859 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22860 #undef THUMB_VARIANT
22861 #define THUMB_VARIANT & arm_ext_v6t2
22862
22863 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
22864 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
22865 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
22866
22867 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
22868 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
22869
22870 #undef ARM_VARIANT
22871 #define ARM_VARIANT & fpu_neon_ext_v1
22872 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
22873 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
22874 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
22875 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
22876 };
22877 #undef ARM_VARIANT
22878 #undef THUMB_VARIANT
22879 #undef TCE
22880 #undef TUE
22881 #undef TUF
22882 #undef TCC
22883 #undef cCE
22884 #undef cCL
22885 #undef C3E
22886 #undef C3
22887 #undef CE
22888 #undef CM
22889 #undef CL
22890 #undef UE
22891 #undef UF
22892 #undef UT
22893 #undef NUF
22894 #undef nUF
22895 #undef NCE
22896 #undef nCE
22897 #undef OPS0
22898 #undef OPS1
22899 #undef OPS2
22900 #undef OPS3
22901 #undef OPS4
22902 #undef OPS5
22903 #undef OPS6
22904 #undef do_0
22905 #undef ToC
22906 #undef toC
22907 #undef ToU
22908 #undef toU
22909 \f
22910 /* MD interface: bits in the object file. */
22911
22912 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22913 for use in the a.out file, and stores them in the array pointed to by buf.
22914 This knows about the endian-ness of the target machine and does
22915 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22916 2 (short) and 4 (long) Floating numbers are put out as a series of
22917 LITTLENUMS (shorts, here at least). */
22918
22919 void
22920 md_number_to_chars (char * buf, valueT val, int n)
22921 {
22922 if (target_big_endian)
22923 number_to_chars_bigendian (buf, val, n);
22924 else
22925 number_to_chars_littleendian (buf, val, n);
22926 }
22927
22928 static valueT
22929 md_chars_to_number (char * buf, int n)
22930 {
22931 valueT result = 0;
22932 unsigned char * where = (unsigned char *) buf;
22933
22934 if (target_big_endian)
22935 {
22936 while (n--)
22937 {
22938 result <<= 8;
22939 result |= (*where++ & 255);
22940 }
22941 }
22942 else
22943 {
22944 while (n--)
22945 {
22946 result <<= 8;
22947 result |= (where[n] & 255);
22948 }
22949 }
22950
22951 return result;
22952 }
22953
22954 /* MD interface: Sections. */
22955
22956 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22957 that an rs_machine_dependent frag may reach. */
22958
22959 unsigned int
22960 arm_frag_max_var (fragS *fragp)
22961 {
22962 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22963 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22964
22965 Note that we generate relaxable instructions even for cases that don't
22966 really need it, like an immediate that's a trivial constant. So we're
22967 overestimating the instruction size for some of those cases. Rather
22968 than putting more intelligence here, it would probably be better to
22969 avoid generating a relaxation frag in the first place when it can be
22970 determined up front that a short instruction will suffice. */
22971
22972 gas_assert (fragp->fr_type == rs_machine_dependent);
22973 return INSN_SIZE;
22974 }
22975
22976 /* Estimate the size of a frag before relaxing. Assume everything fits in
22977 2 bytes. */
22978
22979 int
22980 md_estimate_size_before_relax (fragS * fragp,
22981 segT segtype ATTRIBUTE_UNUSED)
22982 {
22983 fragp->fr_var = 2;
22984 return 2;
22985 }
22986
22987 /* Convert a machine dependent frag. */
22988
22989 void
22990 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
22991 {
22992 unsigned long insn;
22993 unsigned long old_op;
22994 char *buf;
22995 expressionS exp;
22996 fixS *fixp;
22997 int reloc_type;
22998 int pc_rel;
22999 int opcode;
23000
23001 buf = fragp->fr_literal + fragp->fr_fix;
23002
23003 old_op = bfd_get_16(abfd, buf);
23004 if (fragp->fr_symbol)
23005 {
23006 exp.X_op = O_symbol;
23007 exp.X_add_symbol = fragp->fr_symbol;
23008 }
23009 else
23010 {
23011 exp.X_op = O_constant;
23012 }
23013 exp.X_add_number = fragp->fr_offset;
23014 opcode = fragp->fr_subtype;
23015 switch (opcode)
23016 {
23017 case T_MNEM_ldr_pc:
23018 case T_MNEM_ldr_pc2:
23019 case T_MNEM_ldr_sp:
23020 case T_MNEM_str_sp:
23021 case T_MNEM_ldr:
23022 case T_MNEM_ldrb:
23023 case T_MNEM_ldrh:
23024 case T_MNEM_str:
23025 case T_MNEM_strb:
23026 case T_MNEM_strh:
23027 if (fragp->fr_var == 4)
23028 {
23029 insn = THUMB_OP32 (opcode);
23030 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
23031 {
23032 insn |= (old_op & 0x700) << 4;
23033 }
23034 else
23035 {
23036 insn |= (old_op & 7) << 12;
23037 insn |= (old_op & 0x38) << 13;
23038 }
23039 insn |= 0x00000c00;
23040 put_thumb32_insn (buf, insn);
23041 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
23042 }
23043 else
23044 {
23045 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
23046 }
23047 pc_rel = (opcode == T_MNEM_ldr_pc2);
23048 break;
23049 case T_MNEM_adr:
23050 if (fragp->fr_var == 4)
23051 {
23052 insn = THUMB_OP32 (opcode);
23053 insn |= (old_op & 0xf0) << 4;
23054 put_thumb32_insn (buf, insn);
23055 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
23056 }
23057 else
23058 {
23059 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23060 exp.X_add_number -= 4;
23061 }
23062 pc_rel = 1;
23063 break;
23064 case T_MNEM_mov:
23065 case T_MNEM_movs:
23066 case T_MNEM_cmp:
23067 case T_MNEM_cmn:
23068 if (fragp->fr_var == 4)
23069 {
23070 int r0off = (opcode == T_MNEM_mov
23071 || opcode == T_MNEM_movs) ? 0 : 8;
23072 insn = THUMB_OP32 (opcode);
23073 insn = (insn & 0xe1ffffff) | 0x10000000;
23074 insn |= (old_op & 0x700) << r0off;
23075 put_thumb32_insn (buf, insn);
23076 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23077 }
23078 else
23079 {
23080 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
23081 }
23082 pc_rel = 0;
23083 break;
23084 case T_MNEM_b:
23085 if (fragp->fr_var == 4)
23086 {
23087 insn = THUMB_OP32(opcode);
23088 put_thumb32_insn (buf, insn);
23089 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
23090 }
23091 else
23092 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
23093 pc_rel = 1;
23094 break;
23095 case T_MNEM_bcond:
23096 if (fragp->fr_var == 4)
23097 {
23098 insn = THUMB_OP32(opcode);
23099 insn |= (old_op & 0xf00) << 14;
23100 put_thumb32_insn (buf, insn);
23101 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
23102 }
23103 else
23104 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
23105 pc_rel = 1;
23106 break;
23107 case T_MNEM_add_sp:
23108 case T_MNEM_add_pc:
23109 case T_MNEM_inc_sp:
23110 case T_MNEM_dec_sp:
23111 if (fragp->fr_var == 4)
23112 {
23113 /* ??? Choose between add and addw. */
23114 insn = THUMB_OP32 (opcode);
23115 insn |= (old_op & 0xf0) << 4;
23116 put_thumb32_insn (buf, insn);
23117 if (opcode == T_MNEM_add_pc)
23118 reloc_type = BFD_RELOC_ARM_T32_IMM12;
23119 else
23120 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23121 }
23122 else
23123 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23124 pc_rel = 0;
23125 break;
23126
23127 case T_MNEM_addi:
23128 case T_MNEM_addis:
23129 case T_MNEM_subi:
23130 case T_MNEM_subis:
23131 if (fragp->fr_var == 4)
23132 {
23133 insn = THUMB_OP32 (opcode);
23134 insn |= (old_op & 0xf0) << 4;
23135 insn |= (old_op & 0xf) << 16;
23136 put_thumb32_insn (buf, insn);
23137 if (insn & (1 << 20))
23138 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23139 else
23140 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23141 }
23142 else
23143 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23144 pc_rel = 0;
23145 break;
23146 default:
23147 abort ();
23148 }
23149 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
23150 (enum bfd_reloc_code_real) reloc_type);
23151 fixp->fx_file = fragp->fr_file;
23152 fixp->fx_line = fragp->fr_line;
23153 fragp->fr_fix += fragp->fr_var;
23154
23155 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23156 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
23157 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
23158 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
23159 }
23160
23161 /* Return the size of a relaxable immediate operand instruction.
23162 SHIFT and SIZE specify the form of the allowable immediate. */
23163 static int
23164 relax_immediate (fragS *fragp, int size, int shift)
23165 {
23166 offsetT offset;
23167 offsetT mask;
23168 offsetT low;
23169
23170 /* ??? Should be able to do better than this. */
23171 if (fragp->fr_symbol)
23172 return 4;
23173
23174 low = (1 << shift) - 1;
23175 mask = (1 << (shift + size)) - (1 << shift);
23176 offset = fragp->fr_offset;
23177 /* Force misaligned offsets to 32-bit variant. */
23178 if (offset & low)
23179 return 4;
23180 if (offset & ~mask)
23181 return 4;
23182 return 2;
23183 }
23184
23185 /* Get the address of a symbol during relaxation. */
23186 static addressT
23187 relaxed_symbol_addr (fragS *fragp, long stretch)
23188 {
23189 fragS *sym_frag;
23190 addressT addr;
23191 symbolS *sym;
23192
23193 sym = fragp->fr_symbol;
23194 sym_frag = symbol_get_frag (sym);
23195 know (S_GET_SEGMENT (sym) != absolute_section
23196 || sym_frag == &zero_address_frag);
23197 addr = S_GET_VALUE (sym) + fragp->fr_offset;
23198
23199 /* If frag has yet to be reached on this pass, assume it will
23200 move by STRETCH just as we did. If this is not so, it will
23201 be because some frag between grows, and that will force
23202 another pass. */
23203
23204 if (stretch != 0
23205 && sym_frag->relax_marker != fragp->relax_marker)
23206 {
23207 fragS *f;
23208
23209 /* Adjust stretch for any alignment frag. Note that if have
23210 been expanding the earlier code, the symbol may be
23211 defined in what appears to be an earlier frag. FIXME:
23212 This doesn't handle the fr_subtype field, which specifies
23213 a maximum number of bytes to skip when doing an
23214 alignment. */
23215 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
23216 {
23217 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
23218 {
23219 if (stretch < 0)
23220 stretch = - ((- stretch)
23221 & ~ ((1 << (int) f->fr_offset) - 1));
23222 else
23223 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
23224 if (stretch == 0)
23225 break;
23226 }
23227 }
23228 if (f != NULL)
23229 addr += stretch;
23230 }
23231
23232 return addr;
23233 }
23234
23235 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23236 load. */
23237 static int
23238 relax_adr (fragS *fragp, asection *sec, long stretch)
23239 {
23240 addressT addr;
23241 offsetT val;
23242
23243 /* Assume worst case for symbols not known to be in the same section. */
23244 if (fragp->fr_symbol == NULL
23245 || !S_IS_DEFINED (fragp->fr_symbol)
23246 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23247 || S_IS_WEAK (fragp->fr_symbol))
23248 return 4;
23249
23250 val = relaxed_symbol_addr (fragp, stretch);
23251 addr = fragp->fr_address + fragp->fr_fix;
23252 addr = (addr + 4) & ~3;
23253 /* Force misaligned targets to 32-bit variant. */
23254 if (val & 3)
23255 return 4;
23256 val -= addr;
23257 if (val < 0 || val > 1020)
23258 return 4;
23259 return 2;
23260 }
23261
23262 /* Return the size of a relaxable add/sub immediate instruction. */
23263 static int
23264 relax_addsub (fragS *fragp, asection *sec)
23265 {
23266 char *buf;
23267 int op;
23268
23269 buf = fragp->fr_literal + fragp->fr_fix;
23270 op = bfd_get_16(sec->owner, buf);
23271 if ((op & 0xf) == ((op >> 4) & 0xf))
23272 return relax_immediate (fragp, 8, 0);
23273 else
23274 return relax_immediate (fragp, 3, 0);
23275 }
23276
23277 /* Return TRUE iff the definition of symbol S could be pre-empted
23278 (overridden) at link or load time. */
23279 static bfd_boolean
23280 symbol_preemptible (symbolS *s)
23281 {
23282 /* Weak symbols can always be pre-empted. */
23283 if (S_IS_WEAK (s))
23284 return TRUE;
23285
23286 /* Non-global symbols cannot be pre-empted. */
23287 if (! S_IS_EXTERNAL (s))
23288 return FALSE;
23289
23290 #ifdef OBJ_ELF
23291 /* In ELF, a global symbol can be marked protected, or private. In that
23292 case it can't be pre-empted (other definitions in the same link unit
23293 would violate the ODR). */
23294 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
23295 return FALSE;
23296 #endif
23297
23298 /* Other global symbols might be pre-empted. */
23299 return TRUE;
23300 }
23301
23302 /* Return the size of a relaxable branch instruction. BITS is the
23303 size of the offset field in the narrow instruction. */
23304
23305 static int
23306 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
23307 {
23308 addressT addr;
23309 offsetT val;
23310 offsetT limit;
23311
23312 /* Assume worst case for symbols not known to be in the same section. */
23313 if (!S_IS_DEFINED (fragp->fr_symbol)
23314 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23315 || S_IS_WEAK (fragp->fr_symbol))
23316 return 4;
23317
23318 #ifdef OBJ_ELF
23319 /* A branch to a function in ARM state will require interworking. */
23320 if (S_IS_DEFINED (fragp->fr_symbol)
23321 && ARM_IS_FUNC (fragp->fr_symbol))
23322 return 4;
23323 #endif
23324
23325 if (symbol_preemptible (fragp->fr_symbol))
23326 return 4;
23327
23328 val = relaxed_symbol_addr (fragp, stretch);
23329 addr = fragp->fr_address + fragp->fr_fix + 4;
23330 val -= addr;
23331
23332 /* Offset is a signed value *2 */
23333 limit = 1 << bits;
23334 if (val >= limit || val < -limit)
23335 return 4;
23336 return 2;
23337 }
23338
23339
23340 /* Relax a machine dependent frag. This returns the amount by which
23341 the current size of the frag should change. */
23342
23343 int
23344 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
23345 {
23346 int oldsize;
23347 int newsize;
23348
23349 oldsize = fragp->fr_var;
23350 switch (fragp->fr_subtype)
23351 {
23352 case T_MNEM_ldr_pc2:
23353 newsize = relax_adr (fragp, sec, stretch);
23354 break;
23355 case T_MNEM_ldr_pc:
23356 case T_MNEM_ldr_sp:
23357 case T_MNEM_str_sp:
23358 newsize = relax_immediate (fragp, 8, 2);
23359 break;
23360 case T_MNEM_ldr:
23361 case T_MNEM_str:
23362 newsize = relax_immediate (fragp, 5, 2);
23363 break;
23364 case T_MNEM_ldrh:
23365 case T_MNEM_strh:
23366 newsize = relax_immediate (fragp, 5, 1);
23367 break;
23368 case T_MNEM_ldrb:
23369 case T_MNEM_strb:
23370 newsize = relax_immediate (fragp, 5, 0);
23371 break;
23372 case T_MNEM_adr:
23373 newsize = relax_adr (fragp, sec, stretch);
23374 break;
23375 case T_MNEM_mov:
23376 case T_MNEM_movs:
23377 case T_MNEM_cmp:
23378 case T_MNEM_cmn:
23379 newsize = relax_immediate (fragp, 8, 0);
23380 break;
23381 case T_MNEM_b:
23382 newsize = relax_branch (fragp, sec, 11, stretch);
23383 break;
23384 case T_MNEM_bcond:
23385 newsize = relax_branch (fragp, sec, 8, stretch);
23386 break;
23387 case T_MNEM_add_sp:
23388 case T_MNEM_add_pc:
23389 newsize = relax_immediate (fragp, 8, 2);
23390 break;
23391 case T_MNEM_inc_sp:
23392 case T_MNEM_dec_sp:
23393 newsize = relax_immediate (fragp, 7, 2);
23394 break;
23395 case T_MNEM_addi:
23396 case T_MNEM_addis:
23397 case T_MNEM_subi:
23398 case T_MNEM_subis:
23399 newsize = relax_addsub (fragp, sec);
23400 break;
23401 default:
23402 abort ();
23403 }
23404
23405 fragp->fr_var = newsize;
23406 /* Freeze wide instructions that are at or before the same location as
23407 in the previous pass. This avoids infinite loops.
23408 Don't freeze them unconditionally because targets may be artificially
23409 misaligned by the expansion of preceding frags. */
23410 if (stretch <= 0 && newsize > 2)
23411 {
23412 md_convert_frag (sec->owner, sec, fragp);
23413 frag_wane (fragp);
23414 }
23415
23416 return newsize - oldsize;
23417 }
23418
23419 /* Round up a section size to the appropriate boundary. */
23420
23421 valueT
23422 md_section_align (segT segment ATTRIBUTE_UNUSED,
23423 valueT size)
23424 {
23425 return size;
23426 }
23427
23428 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23429 of an rs_align_code fragment. */
23430
23431 void
23432 arm_handle_align (fragS * fragP)
23433 {
23434 static unsigned char const arm_noop[2][2][4] =
23435 {
23436 { /* ARMv1 */
23437 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23438 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23439 },
23440 { /* ARMv6k */
23441 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23442 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23443 },
23444 };
23445 static unsigned char const thumb_noop[2][2][2] =
23446 {
23447 { /* Thumb-1 */
23448 {0xc0, 0x46}, /* LE */
23449 {0x46, 0xc0}, /* BE */
23450 },
23451 { /* Thumb-2 */
23452 {0x00, 0xbf}, /* LE */
23453 {0xbf, 0x00} /* BE */
23454 }
23455 };
23456 static unsigned char const wide_thumb_noop[2][4] =
23457 { /* Wide Thumb-2 */
23458 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23459 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23460 };
23461
23462 unsigned bytes, fix, noop_size;
23463 char * p;
23464 const unsigned char * noop;
23465 const unsigned char *narrow_noop = NULL;
23466 #ifdef OBJ_ELF
23467 enum mstate state;
23468 #endif
23469
23470 if (fragP->fr_type != rs_align_code)
23471 return;
23472
23473 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
23474 p = fragP->fr_literal + fragP->fr_fix;
23475 fix = 0;
23476
23477 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
23478 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
23479
23480 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
23481
23482 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
23483 {
23484 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23485 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
23486 {
23487 narrow_noop = thumb_noop[1][target_big_endian];
23488 noop = wide_thumb_noop[target_big_endian];
23489 }
23490 else
23491 noop = thumb_noop[0][target_big_endian];
23492 noop_size = 2;
23493 #ifdef OBJ_ELF
23494 state = MAP_THUMB;
23495 #endif
23496 }
23497 else
23498 {
23499 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23500 ? selected_cpu : arm_arch_none,
23501 arm_ext_v6k) != 0]
23502 [target_big_endian];
23503 noop_size = 4;
23504 #ifdef OBJ_ELF
23505 state = MAP_ARM;
23506 #endif
23507 }
23508
23509 fragP->fr_var = noop_size;
23510
23511 if (bytes & (noop_size - 1))
23512 {
23513 fix = bytes & (noop_size - 1);
23514 #ifdef OBJ_ELF
23515 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
23516 #endif
23517 memset (p, 0, fix);
23518 p += fix;
23519 bytes -= fix;
23520 }
23521
23522 if (narrow_noop)
23523 {
23524 if (bytes & noop_size)
23525 {
23526 /* Insert a narrow noop. */
23527 memcpy (p, narrow_noop, noop_size);
23528 p += noop_size;
23529 bytes -= noop_size;
23530 fix += noop_size;
23531 }
23532
23533 /* Use wide noops for the remainder */
23534 noop_size = 4;
23535 }
23536
23537 while (bytes >= noop_size)
23538 {
23539 memcpy (p, noop, noop_size);
23540 p += noop_size;
23541 bytes -= noop_size;
23542 fix += noop_size;
23543 }
23544
23545 fragP->fr_fix += fix;
23546 }
23547
23548 /* Called from md_do_align. Used to create an alignment
23549 frag in a code section. */
23550
23551 void
23552 arm_frag_align_code (int n, int max)
23553 {
23554 char * p;
23555
23556 /* We assume that there will never be a requirement
23557 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23558 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
23559 {
23560 char err_msg[128];
23561
23562 sprintf (err_msg,
23563 _("alignments greater than %d bytes not supported in .text sections."),
23564 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
23565 as_fatal ("%s", err_msg);
23566 }
23567
23568 p = frag_var (rs_align_code,
23569 MAX_MEM_FOR_RS_ALIGN_CODE,
23570 1,
23571 (relax_substateT) max,
23572 (symbolS *) NULL,
23573 (offsetT) n,
23574 (char *) NULL);
23575 *p = 0;
23576 }
23577
23578 /* Perform target specific initialisation of a frag.
23579 Note - despite the name this initialisation is not done when the frag
23580 is created, but only when its type is assigned. A frag can be created
23581 and used a long time before its type is set, so beware of assuming that
23582 this initialisation is performed first. */
23583
23584 #ifndef OBJ_ELF
23585 void
23586 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
23587 {
23588 /* Record whether this frag is in an ARM or a THUMB area. */
23589 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
23590 }
23591
23592 #else /* OBJ_ELF is defined. */
23593 void
23594 arm_init_frag (fragS * fragP, int max_chars)
23595 {
23596 bfd_boolean frag_thumb_mode;
23597
23598 /* If the current ARM vs THUMB mode has not already
23599 been recorded into this frag then do so now. */
23600 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
23601 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
23602
23603 /* PR 21809: Do not set a mapping state for debug sections
23604 - it just confuses other tools. */
23605 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
23606 return;
23607
23608 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
23609
23610 /* Record a mapping symbol for alignment frags. We will delete this
23611 later if the alignment ends up empty. */
23612 switch (fragP->fr_type)
23613 {
23614 case rs_align:
23615 case rs_align_test:
23616 case rs_fill:
23617 mapping_state_2 (MAP_DATA, max_chars);
23618 break;
23619 case rs_align_code:
23620 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
23621 break;
23622 default:
23623 break;
23624 }
23625 }
23626
23627 /* When we change sections we need to issue a new mapping symbol. */
23628
23629 void
23630 arm_elf_change_section (void)
23631 {
23632 /* Link an unlinked unwind index table section to the .text section. */
23633 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
23634 && elf_linked_to_section (now_seg) == NULL)
23635 elf_linked_to_section (now_seg) = text_section;
23636 }
23637
23638 int
23639 arm_elf_section_type (const char * str, size_t len)
23640 {
23641 if (len == 5 && strncmp (str, "exidx", 5) == 0)
23642 return SHT_ARM_EXIDX;
23643
23644 return -1;
23645 }
23646 \f
23647 /* Code to deal with unwinding tables. */
23648
23649 static void add_unwind_adjustsp (offsetT);
23650
23651 /* Generate any deferred unwind frame offset. */
23652
23653 static void
23654 flush_pending_unwind (void)
23655 {
23656 offsetT offset;
23657
23658 offset = unwind.pending_offset;
23659 unwind.pending_offset = 0;
23660 if (offset != 0)
23661 add_unwind_adjustsp (offset);
23662 }
23663
23664 /* Add an opcode to this list for this function. Two-byte opcodes should
23665 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
23666 order. */
23667
23668 static void
23669 add_unwind_opcode (valueT op, int length)
23670 {
23671 /* Add any deferred stack adjustment. */
23672 if (unwind.pending_offset)
23673 flush_pending_unwind ();
23674
23675 unwind.sp_restored = 0;
23676
23677 if (unwind.opcode_count + length > unwind.opcode_alloc)
23678 {
23679 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
23680 if (unwind.opcodes)
23681 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
23682 unwind.opcode_alloc);
23683 else
23684 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
23685 }
23686 while (length > 0)
23687 {
23688 length--;
23689 unwind.opcodes[unwind.opcode_count] = op & 0xff;
23690 op >>= 8;
23691 unwind.opcode_count++;
23692 }
23693 }
23694
23695 /* Add unwind opcodes to adjust the stack pointer. */
23696
23697 static void
23698 add_unwind_adjustsp (offsetT offset)
23699 {
23700 valueT op;
23701
23702 if (offset > 0x200)
23703 {
23704 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
23705 char bytes[5];
23706 int n;
23707 valueT o;
23708
23709 /* Long form: 0xb2, uleb128. */
23710 /* This might not fit in a word so add the individual bytes,
23711 remembering the list is built in reverse order. */
23712 o = (valueT) ((offset - 0x204) >> 2);
23713 if (o == 0)
23714 add_unwind_opcode (0, 1);
23715
23716 /* Calculate the uleb128 encoding of the offset. */
23717 n = 0;
23718 while (o)
23719 {
23720 bytes[n] = o & 0x7f;
23721 o >>= 7;
23722 if (o)
23723 bytes[n] |= 0x80;
23724 n++;
23725 }
23726 /* Add the insn. */
23727 for (; n; n--)
23728 add_unwind_opcode (bytes[n - 1], 1);
23729 add_unwind_opcode (0xb2, 1);
23730 }
23731 else if (offset > 0x100)
23732 {
23733 /* Two short opcodes. */
23734 add_unwind_opcode (0x3f, 1);
23735 op = (offset - 0x104) >> 2;
23736 add_unwind_opcode (op, 1);
23737 }
23738 else if (offset > 0)
23739 {
23740 /* Short opcode. */
23741 op = (offset - 4) >> 2;
23742 add_unwind_opcode (op, 1);
23743 }
23744 else if (offset < 0)
23745 {
23746 offset = -offset;
23747 while (offset > 0x100)
23748 {
23749 add_unwind_opcode (0x7f, 1);
23750 offset -= 0x100;
23751 }
23752 op = ((offset - 4) >> 2) | 0x40;
23753 add_unwind_opcode (op, 1);
23754 }
23755 }
23756
23757 /* Finish the list of unwind opcodes for this function. */
23758
23759 static void
23760 finish_unwind_opcodes (void)
23761 {
23762 valueT op;
23763
23764 if (unwind.fp_used)
23765 {
23766 /* Adjust sp as necessary. */
23767 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
23768 flush_pending_unwind ();
23769
23770 /* After restoring sp from the frame pointer. */
23771 op = 0x90 | unwind.fp_reg;
23772 add_unwind_opcode (op, 1);
23773 }
23774 else
23775 flush_pending_unwind ();
23776 }
23777
23778
23779 /* Start an exception table entry. If idx is nonzero this is an index table
23780 entry. */
23781
23782 static void
23783 start_unwind_section (const segT text_seg, int idx)
23784 {
23785 const char * text_name;
23786 const char * prefix;
23787 const char * prefix_once;
23788 const char * group_name;
23789 char * sec_name;
23790 int type;
23791 int flags;
23792 int linkonce;
23793
23794 if (idx)
23795 {
23796 prefix = ELF_STRING_ARM_unwind;
23797 prefix_once = ELF_STRING_ARM_unwind_once;
23798 type = SHT_ARM_EXIDX;
23799 }
23800 else
23801 {
23802 prefix = ELF_STRING_ARM_unwind_info;
23803 prefix_once = ELF_STRING_ARM_unwind_info_once;
23804 type = SHT_PROGBITS;
23805 }
23806
23807 text_name = segment_name (text_seg);
23808 if (streq (text_name, ".text"))
23809 text_name = "";
23810
23811 if (strncmp (text_name, ".gnu.linkonce.t.",
23812 strlen (".gnu.linkonce.t.")) == 0)
23813 {
23814 prefix = prefix_once;
23815 text_name += strlen (".gnu.linkonce.t.");
23816 }
23817
23818 sec_name = concat (prefix, text_name, (char *) NULL);
23819
23820 flags = SHF_ALLOC;
23821 linkonce = 0;
23822 group_name = 0;
23823
23824 /* Handle COMDAT group. */
23825 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
23826 {
23827 group_name = elf_group_name (text_seg);
23828 if (group_name == NULL)
23829 {
23830 as_bad (_("Group section `%s' has no group signature"),
23831 segment_name (text_seg));
23832 ignore_rest_of_line ();
23833 return;
23834 }
23835 flags |= SHF_GROUP;
23836 linkonce = 1;
23837 }
23838
23839 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
23840 linkonce, 0);
23841
23842 /* Set the section link for index tables. */
23843 if (idx)
23844 elf_linked_to_section (now_seg) = text_seg;
23845 }
23846
23847
23848 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23849 personality routine data. Returns zero, or the index table value for
23850 an inline entry. */
23851
23852 static valueT
23853 create_unwind_entry (int have_data)
23854 {
23855 int size;
23856 addressT where;
23857 char *ptr;
23858 /* The current word of data. */
23859 valueT data;
23860 /* The number of bytes left in this word. */
23861 int n;
23862
23863 finish_unwind_opcodes ();
23864
23865 /* Remember the current text section. */
23866 unwind.saved_seg = now_seg;
23867 unwind.saved_subseg = now_subseg;
23868
23869 start_unwind_section (now_seg, 0);
23870
23871 if (unwind.personality_routine == NULL)
23872 {
23873 if (unwind.personality_index == -2)
23874 {
23875 if (have_data)
23876 as_bad (_("handlerdata in cantunwind frame"));
23877 return 1; /* EXIDX_CANTUNWIND. */
23878 }
23879
23880 /* Use a default personality routine if none is specified. */
23881 if (unwind.personality_index == -1)
23882 {
23883 if (unwind.opcode_count > 3)
23884 unwind.personality_index = 1;
23885 else
23886 unwind.personality_index = 0;
23887 }
23888
23889 /* Space for the personality routine entry. */
23890 if (unwind.personality_index == 0)
23891 {
23892 if (unwind.opcode_count > 3)
23893 as_bad (_("too many unwind opcodes for personality routine 0"));
23894
23895 if (!have_data)
23896 {
23897 /* All the data is inline in the index table. */
23898 data = 0x80;
23899 n = 3;
23900 while (unwind.opcode_count > 0)
23901 {
23902 unwind.opcode_count--;
23903 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23904 n--;
23905 }
23906
23907 /* Pad with "finish" opcodes. */
23908 while (n--)
23909 data = (data << 8) | 0xb0;
23910
23911 return data;
23912 }
23913 size = 0;
23914 }
23915 else
23916 /* We get two opcodes "free" in the first word. */
23917 size = unwind.opcode_count - 2;
23918 }
23919 else
23920 {
23921 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23922 if (unwind.personality_index != -1)
23923 {
23924 as_bad (_("attempt to recreate an unwind entry"));
23925 return 1;
23926 }
23927
23928 /* An extra byte is required for the opcode count. */
23929 size = unwind.opcode_count + 1;
23930 }
23931
23932 size = (size + 3) >> 2;
23933 if (size > 0xff)
23934 as_bad (_("too many unwind opcodes"));
23935
23936 frag_align (2, 0, 0);
23937 record_alignment (now_seg, 2);
23938 unwind.table_entry = expr_build_dot ();
23939
23940 /* Allocate the table entry. */
23941 ptr = frag_more ((size << 2) + 4);
23942 /* PR 13449: Zero the table entries in case some of them are not used. */
23943 memset (ptr, 0, (size << 2) + 4);
23944 where = frag_now_fix () - ((size << 2) + 4);
23945
23946 switch (unwind.personality_index)
23947 {
23948 case -1:
23949 /* ??? Should this be a PLT generating relocation? */
23950 /* Custom personality routine. */
23951 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
23952 BFD_RELOC_ARM_PREL31);
23953
23954 where += 4;
23955 ptr += 4;
23956
23957 /* Set the first byte to the number of additional words. */
23958 data = size > 0 ? size - 1 : 0;
23959 n = 3;
23960 break;
23961
23962 /* ABI defined personality routines. */
23963 case 0:
23964 /* Three opcodes bytes are packed into the first word. */
23965 data = 0x80;
23966 n = 3;
23967 break;
23968
23969 case 1:
23970 case 2:
23971 /* The size and first two opcode bytes go in the first word. */
23972 data = ((0x80 + unwind.personality_index) << 8) | size;
23973 n = 2;
23974 break;
23975
23976 default:
23977 /* Should never happen. */
23978 abort ();
23979 }
23980
23981 /* Pack the opcodes into words (MSB first), reversing the list at the same
23982 time. */
23983 while (unwind.opcode_count > 0)
23984 {
23985 if (n == 0)
23986 {
23987 md_number_to_chars (ptr, data, 4);
23988 ptr += 4;
23989 n = 4;
23990 data = 0;
23991 }
23992 unwind.opcode_count--;
23993 n--;
23994 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23995 }
23996
23997 /* Finish off the last word. */
23998 if (n < 4)
23999 {
24000 /* Pad with "finish" opcodes. */
24001 while (n--)
24002 data = (data << 8) | 0xb0;
24003
24004 md_number_to_chars (ptr, data, 4);
24005 }
24006
24007 if (!have_data)
24008 {
24009 /* Add an empty descriptor if there is no user-specified data. */
24010 ptr = frag_more (4);
24011 md_number_to_chars (ptr, 0, 4);
24012 }
24013
24014 return 0;
24015 }
24016
24017
24018 /* Initialize the DWARF-2 unwind information for this procedure. */
24019
24020 void
24021 tc_arm_frame_initial_instructions (void)
24022 {
24023 cfi_add_CFA_def_cfa (REG_SP, 0);
24024 }
24025 #endif /* OBJ_ELF */
24026
24027 /* Convert REGNAME to a DWARF-2 register number. */
24028
24029 int
24030 tc_arm_regname_to_dw2regnum (char *regname)
24031 {
24032 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
24033 if (reg != FAIL)
24034 return reg;
24035
24036 /* PR 16694: Allow VFP registers as well. */
24037 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
24038 if (reg != FAIL)
24039 return 64 + reg;
24040
24041 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
24042 if (reg != FAIL)
24043 return reg + 256;
24044
24045 return FAIL;
24046 }
24047
24048 #ifdef TE_PE
24049 void
24050 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
24051 {
24052 expressionS exp;
24053
24054 exp.X_op = O_secrel;
24055 exp.X_add_symbol = symbol;
24056 exp.X_add_number = 0;
24057 emit_expr (&exp, size);
24058 }
24059 #endif
24060
24061 /* MD interface: Symbol and relocation handling. */
24062
24063 /* Return the address within the segment that a PC-relative fixup is
24064 relative to. For ARM, PC-relative fixups applied to instructions
24065 are generally relative to the location of the fixup plus 8 bytes.
24066 Thumb branches are offset by 4, and Thumb loads relative to PC
24067 require special handling. */
24068
24069 long
24070 md_pcrel_from_section (fixS * fixP, segT seg)
24071 {
24072 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
24073
24074 /* If this is pc-relative and we are going to emit a relocation
24075 then we just want to put out any pipeline compensation that the linker
24076 will need. Otherwise we want to use the calculated base.
24077 For WinCE we skip the bias for externals as well, since this
24078 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24079 if (fixP->fx_pcrel
24080 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
24081 || (arm_force_relocation (fixP)
24082 #ifdef TE_WINCE
24083 && !S_IS_EXTERNAL (fixP->fx_addsy)
24084 #endif
24085 )))
24086 base = 0;
24087
24088
24089 switch (fixP->fx_r_type)
24090 {
24091 /* PC relative addressing on the Thumb is slightly odd as the
24092 bottom two bits of the PC are forced to zero for the
24093 calculation. This happens *after* application of the
24094 pipeline offset. However, Thumb adrl already adjusts for
24095 this, so we need not do it again. */
24096 case BFD_RELOC_ARM_THUMB_ADD:
24097 return base & ~3;
24098
24099 case BFD_RELOC_ARM_THUMB_OFFSET:
24100 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24101 case BFD_RELOC_ARM_T32_ADD_PC12:
24102 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24103 return (base + 4) & ~3;
24104
24105 /* Thumb branches are simply offset by +4. */
24106 case BFD_RELOC_THUMB_PCREL_BRANCH5:
24107 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24108 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24109 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24110 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24111 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24112 case BFD_RELOC_THUMB_PCREL_BFCSEL:
24113 case BFD_RELOC_ARM_THUMB_BF17:
24114 case BFD_RELOC_ARM_THUMB_BF19:
24115 case BFD_RELOC_ARM_THUMB_BF13:
24116 case BFD_RELOC_ARM_THUMB_LOOP12:
24117 return base + 4;
24118
24119 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24120 if (fixP->fx_addsy
24121 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24122 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24123 && ARM_IS_FUNC (fixP->fx_addsy)
24124 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24125 base = fixP->fx_where + fixP->fx_frag->fr_address;
24126 return base + 4;
24127
24128 /* BLX is like branches above, but forces the low two bits of PC to
24129 zero. */
24130 case BFD_RELOC_THUMB_PCREL_BLX:
24131 if (fixP->fx_addsy
24132 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24133 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24134 && THUMB_IS_FUNC (fixP->fx_addsy)
24135 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24136 base = fixP->fx_where + fixP->fx_frag->fr_address;
24137 return (base + 4) & ~3;
24138
24139 /* ARM mode branches are offset by +8. However, the Windows CE
24140 loader expects the relocation not to take this into account. */
24141 case BFD_RELOC_ARM_PCREL_BLX:
24142 if (fixP->fx_addsy
24143 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24144 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24145 && ARM_IS_FUNC (fixP->fx_addsy)
24146 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24147 base = fixP->fx_where + fixP->fx_frag->fr_address;
24148 return base + 8;
24149
24150 case BFD_RELOC_ARM_PCREL_CALL:
24151 if (fixP->fx_addsy
24152 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24153 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24154 && THUMB_IS_FUNC (fixP->fx_addsy)
24155 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24156 base = fixP->fx_where + fixP->fx_frag->fr_address;
24157 return base + 8;
24158
24159 case BFD_RELOC_ARM_PCREL_BRANCH:
24160 case BFD_RELOC_ARM_PCREL_JUMP:
24161 case BFD_RELOC_ARM_PLT32:
24162 #ifdef TE_WINCE
24163 /* When handling fixups immediately, because we have already
24164 discovered the value of a symbol, or the address of the frag involved
24165 we must account for the offset by +8, as the OS loader will never see the reloc.
24166 see fixup_segment() in write.c
24167 The S_IS_EXTERNAL test handles the case of global symbols.
24168 Those need the calculated base, not just the pipe compensation the linker will need. */
24169 if (fixP->fx_pcrel
24170 && fixP->fx_addsy != NULL
24171 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24172 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
24173 return base + 8;
24174 return base;
24175 #else
24176 return base + 8;
24177 #endif
24178
24179
24180 /* ARM mode loads relative to PC are also offset by +8. Unlike
24181 branches, the Windows CE loader *does* expect the relocation
24182 to take this into account. */
24183 case BFD_RELOC_ARM_OFFSET_IMM:
24184 case BFD_RELOC_ARM_OFFSET_IMM8:
24185 case BFD_RELOC_ARM_HWLITERAL:
24186 case BFD_RELOC_ARM_LITERAL:
24187 case BFD_RELOC_ARM_CP_OFF_IMM:
24188 return base + 8;
24189
24190
24191 /* Other PC-relative relocations are un-offset. */
24192 default:
24193 return base;
24194 }
24195 }
24196
24197 static bfd_boolean flag_warn_syms = TRUE;
24198
24199 bfd_boolean
24200 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
24201 {
24202 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24203 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24204 does mean that the resulting code might be very confusing to the reader.
24205 Also this warning can be triggered if the user omits an operand before
24206 an immediate address, eg:
24207
24208 LDR =foo
24209
24210 GAS treats this as an assignment of the value of the symbol foo to a
24211 symbol LDR, and so (without this code) it will not issue any kind of
24212 warning or error message.
24213
24214 Note - ARM instructions are case-insensitive but the strings in the hash
24215 table are all stored in lower case, so we must first ensure that name is
24216 lower case too. */
24217 if (flag_warn_syms && arm_ops_hsh)
24218 {
24219 char * nbuf = strdup (name);
24220 char * p;
24221
24222 for (p = nbuf; *p; p++)
24223 *p = TOLOWER (*p);
24224 if (hash_find (arm_ops_hsh, nbuf) != NULL)
24225 {
24226 static struct hash_control * already_warned = NULL;
24227
24228 if (already_warned == NULL)
24229 already_warned = hash_new ();
24230 /* Only warn about the symbol once. To keep the code
24231 simple we let hash_insert do the lookup for us. */
24232 if (hash_insert (already_warned, nbuf, NULL) == NULL)
24233 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
24234 }
24235 else
24236 free (nbuf);
24237 }
24238
24239 return FALSE;
24240 }
24241
24242 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24243 Otherwise we have no need to default values of symbols. */
24244
24245 symbolS *
24246 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
24247 {
24248 #ifdef OBJ_ELF
24249 if (name[0] == '_' && name[1] == 'G'
24250 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
24251 {
24252 if (!GOT_symbol)
24253 {
24254 if (symbol_find (name))
24255 as_bad (_("GOT already in the symbol table"));
24256
24257 GOT_symbol = symbol_new (name, undefined_section,
24258 (valueT) 0, & zero_address_frag);
24259 }
24260
24261 return GOT_symbol;
24262 }
24263 #endif
24264
24265 return NULL;
24266 }
24267
24268 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24269 computed as two separate immediate values, added together. We
24270 already know that this value cannot be computed by just one ARM
24271 instruction. */
24272
24273 static unsigned int
24274 validate_immediate_twopart (unsigned int val,
24275 unsigned int * highpart)
24276 {
24277 unsigned int a;
24278 unsigned int i;
24279
24280 for (i = 0; i < 32; i += 2)
24281 if (((a = rotate_left (val, i)) & 0xff) != 0)
24282 {
24283 if (a & 0xff00)
24284 {
24285 if (a & ~ 0xffff)
24286 continue;
24287 * highpart = (a >> 8) | ((i + 24) << 7);
24288 }
24289 else if (a & 0xff0000)
24290 {
24291 if (a & 0xff000000)
24292 continue;
24293 * highpart = (a >> 16) | ((i + 16) << 7);
24294 }
24295 else
24296 {
24297 gas_assert (a & 0xff000000);
24298 * highpart = (a >> 24) | ((i + 8) << 7);
24299 }
24300
24301 return (a & 0xff) | (i << 7);
24302 }
24303
24304 return FAIL;
24305 }
24306
24307 static int
24308 validate_offset_imm (unsigned int val, int hwse)
24309 {
24310 if ((hwse && val > 255) || val > 4095)
24311 return FAIL;
24312 return val;
24313 }
24314
24315 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24316 negative immediate constant by altering the instruction. A bit of
24317 a hack really.
24318 MOV <-> MVN
24319 AND <-> BIC
24320 ADC <-> SBC
24321 by inverting the second operand, and
24322 ADD <-> SUB
24323 CMP <-> CMN
24324 by negating the second operand. */
24325
24326 static int
24327 negate_data_op (unsigned long * instruction,
24328 unsigned long value)
24329 {
24330 int op, new_inst;
24331 unsigned long negated, inverted;
24332
24333 negated = encode_arm_immediate (-value);
24334 inverted = encode_arm_immediate (~value);
24335
24336 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
24337 switch (op)
24338 {
24339 /* First negates. */
24340 case OPCODE_SUB: /* ADD <-> SUB */
24341 new_inst = OPCODE_ADD;
24342 value = negated;
24343 break;
24344
24345 case OPCODE_ADD:
24346 new_inst = OPCODE_SUB;
24347 value = negated;
24348 break;
24349
24350 case OPCODE_CMP: /* CMP <-> CMN */
24351 new_inst = OPCODE_CMN;
24352 value = negated;
24353 break;
24354
24355 case OPCODE_CMN:
24356 new_inst = OPCODE_CMP;
24357 value = negated;
24358 break;
24359
24360 /* Now Inverted ops. */
24361 case OPCODE_MOV: /* MOV <-> MVN */
24362 new_inst = OPCODE_MVN;
24363 value = inverted;
24364 break;
24365
24366 case OPCODE_MVN:
24367 new_inst = OPCODE_MOV;
24368 value = inverted;
24369 break;
24370
24371 case OPCODE_AND: /* AND <-> BIC */
24372 new_inst = OPCODE_BIC;
24373 value = inverted;
24374 break;
24375
24376 case OPCODE_BIC:
24377 new_inst = OPCODE_AND;
24378 value = inverted;
24379 break;
24380
24381 case OPCODE_ADC: /* ADC <-> SBC */
24382 new_inst = OPCODE_SBC;
24383 value = inverted;
24384 break;
24385
24386 case OPCODE_SBC:
24387 new_inst = OPCODE_ADC;
24388 value = inverted;
24389 break;
24390
24391 /* We cannot do anything. */
24392 default:
24393 return FAIL;
24394 }
24395
24396 if (value == (unsigned) FAIL)
24397 return FAIL;
24398
24399 *instruction &= OPCODE_MASK;
24400 *instruction |= new_inst << DATA_OP_SHIFT;
24401 return value;
24402 }
24403
24404 /* Like negate_data_op, but for Thumb-2. */
24405
24406 static unsigned int
24407 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
24408 {
24409 int op, new_inst;
24410 int rd;
24411 unsigned int negated, inverted;
24412
24413 negated = encode_thumb32_immediate (-value);
24414 inverted = encode_thumb32_immediate (~value);
24415
24416 rd = (*instruction >> 8) & 0xf;
24417 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
24418 switch (op)
24419 {
24420 /* ADD <-> SUB. Includes CMP <-> CMN. */
24421 case T2_OPCODE_SUB:
24422 new_inst = T2_OPCODE_ADD;
24423 value = negated;
24424 break;
24425
24426 case T2_OPCODE_ADD:
24427 new_inst = T2_OPCODE_SUB;
24428 value = negated;
24429 break;
24430
24431 /* ORR <-> ORN. Includes MOV <-> MVN. */
24432 case T2_OPCODE_ORR:
24433 new_inst = T2_OPCODE_ORN;
24434 value = inverted;
24435 break;
24436
24437 case T2_OPCODE_ORN:
24438 new_inst = T2_OPCODE_ORR;
24439 value = inverted;
24440 break;
24441
24442 /* AND <-> BIC. TST has no inverted equivalent. */
24443 case T2_OPCODE_AND:
24444 new_inst = T2_OPCODE_BIC;
24445 if (rd == 15)
24446 value = FAIL;
24447 else
24448 value = inverted;
24449 break;
24450
24451 case T2_OPCODE_BIC:
24452 new_inst = T2_OPCODE_AND;
24453 value = inverted;
24454 break;
24455
24456 /* ADC <-> SBC */
24457 case T2_OPCODE_ADC:
24458 new_inst = T2_OPCODE_SBC;
24459 value = inverted;
24460 break;
24461
24462 case T2_OPCODE_SBC:
24463 new_inst = T2_OPCODE_ADC;
24464 value = inverted;
24465 break;
24466
24467 /* We cannot do anything. */
24468 default:
24469 return FAIL;
24470 }
24471
24472 if (value == (unsigned int)FAIL)
24473 return FAIL;
24474
24475 *instruction &= T2_OPCODE_MASK;
24476 *instruction |= new_inst << T2_DATA_OP_SHIFT;
24477 return value;
24478 }
24479
24480 /* Read a 32-bit thumb instruction from buf. */
24481
24482 static unsigned long
24483 get_thumb32_insn (char * buf)
24484 {
24485 unsigned long insn;
24486 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
24487 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24488
24489 return insn;
24490 }
24491
24492 /* We usually want to set the low bit on the address of thumb function
24493 symbols. In particular .word foo - . should have the low bit set.
24494 Generic code tries to fold the difference of two symbols to
24495 a constant. Prevent this and force a relocation when the first symbols
24496 is a thumb function. */
24497
24498 bfd_boolean
24499 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
24500 {
24501 if (op == O_subtract
24502 && l->X_op == O_symbol
24503 && r->X_op == O_symbol
24504 && THUMB_IS_FUNC (l->X_add_symbol))
24505 {
24506 l->X_op = O_subtract;
24507 l->X_op_symbol = r->X_add_symbol;
24508 l->X_add_number -= r->X_add_number;
24509 return TRUE;
24510 }
24511
24512 /* Process as normal. */
24513 return FALSE;
24514 }
24515
24516 /* Encode Thumb2 unconditional branches and calls. The encoding
24517 for the 2 are identical for the immediate values. */
24518
24519 static void
24520 encode_thumb2_b_bl_offset (char * buf, offsetT value)
24521 {
24522 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24523 offsetT newval;
24524 offsetT newval2;
24525 addressT S, I1, I2, lo, hi;
24526
24527 S = (value >> 24) & 0x01;
24528 I1 = (value >> 23) & 0x01;
24529 I2 = (value >> 22) & 0x01;
24530 hi = (value >> 12) & 0x3ff;
24531 lo = (value >> 1) & 0x7ff;
24532 newval = md_chars_to_number (buf, THUMB_SIZE);
24533 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24534 newval |= (S << 10) | hi;
24535 newval2 &= ~T2I1I2MASK;
24536 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
24537 md_number_to_chars (buf, newval, THUMB_SIZE);
24538 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24539 }
24540
24541 void
24542 md_apply_fix (fixS * fixP,
24543 valueT * valP,
24544 segT seg)
24545 {
24546 offsetT value = * valP;
24547 offsetT newval;
24548 unsigned int newimm;
24549 unsigned long temp;
24550 int sign;
24551 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
24552
24553 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
24554
24555 /* Note whether this will delete the relocation. */
24556
24557 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
24558 fixP->fx_done = 1;
24559
24560 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24561 consistency with the behaviour on 32-bit hosts. Remember value
24562 for emit_reloc. */
24563 value &= 0xffffffff;
24564 value ^= 0x80000000;
24565 value -= 0x80000000;
24566
24567 *valP = value;
24568 fixP->fx_addnumber = value;
24569
24570 /* Same treatment for fixP->fx_offset. */
24571 fixP->fx_offset &= 0xffffffff;
24572 fixP->fx_offset ^= 0x80000000;
24573 fixP->fx_offset -= 0x80000000;
24574
24575 switch (fixP->fx_r_type)
24576 {
24577 case BFD_RELOC_NONE:
24578 /* This will need to go in the object file. */
24579 fixP->fx_done = 0;
24580 break;
24581
24582 case BFD_RELOC_ARM_IMMEDIATE:
24583 /* We claim that this fixup has been processed here,
24584 even if in fact we generate an error because we do
24585 not have a reloc for it, so tc_gen_reloc will reject it. */
24586 fixP->fx_done = 1;
24587
24588 if (fixP->fx_addsy)
24589 {
24590 const char *msg = 0;
24591
24592 if (! S_IS_DEFINED (fixP->fx_addsy))
24593 msg = _("undefined symbol %s used as an immediate value");
24594 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
24595 msg = _("symbol %s is in a different section");
24596 else if (S_IS_WEAK (fixP->fx_addsy))
24597 msg = _("symbol %s is weak and may be overridden later");
24598
24599 if (msg)
24600 {
24601 as_bad_where (fixP->fx_file, fixP->fx_line,
24602 msg, S_GET_NAME (fixP->fx_addsy));
24603 break;
24604 }
24605 }
24606
24607 temp = md_chars_to_number (buf, INSN_SIZE);
24608
24609 /* If the offset is negative, we should use encoding A2 for ADR. */
24610 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
24611 newimm = negate_data_op (&temp, value);
24612 else
24613 {
24614 newimm = encode_arm_immediate (value);
24615
24616 /* If the instruction will fail, see if we can fix things up by
24617 changing the opcode. */
24618 if (newimm == (unsigned int) FAIL)
24619 newimm = negate_data_op (&temp, value);
24620 /* MOV accepts both ARM modified immediate (A1 encoding) and
24621 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
24622 When disassembling, MOV is preferred when there is no encoding
24623 overlap. */
24624 if (newimm == (unsigned int) FAIL
24625 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
24626 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
24627 && !((temp >> SBIT_SHIFT) & 0x1)
24628 && value >= 0 && value <= 0xffff)
24629 {
24630 /* Clear bits[23:20] to change encoding from A1 to A2. */
24631 temp &= 0xff0fffff;
24632 /* Encoding high 4bits imm. Code below will encode the remaining
24633 low 12bits. */
24634 temp |= (value & 0x0000f000) << 4;
24635 newimm = value & 0x00000fff;
24636 }
24637 }
24638
24639 if (newimm == (unsigned int) FAIL)
24640 {
24641 as_bad_where (fixP->fx_file, fixP->fx_line,
24642 _("invalid constant (%lx) after fixup"),
24643 (unsigned long) value);
24644 break;
24645 }
24646
24647 newimm |= (temp & 0xfffff000);
24648 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
24649 break;
24650
24651 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24652 {
24653 unsigned int highpart = 0;
24654 unsigned int newinsn = 0xe1a00000; /* nop. */
24655
24656 if (fixP->fx_addsy)
24657 {
24658 const char *msg = 0;
24659
24660 if (! S_IS_DEFINED (fixP->fx_addsy))
24661 msg = _("undefined symbol %s used as an immediate value");
24662 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
24663 msg = _("symbol %s is in a different section");
24664 else if (S_IS_WEAK (fixP->fx_addsy))
24665 msg = _("symbol %s is weak and may be overridden later");
24666
24667 if (msg)
24668 {
24669 as_bad_where (fixP->fx_file, fixP->fx_line,
24670 msg, S_GET_NAME (fixP->fx_addsy));
24671 break;
24672 }
24673 }
24674
24675 newimm = encode_arm_immediate (value);
24676 temp = md_chars_to_number (buf, INSN_SIZE);
24677
24678 /* If the instruction will fail, see if we can fix things up by
24679 changing the opcode. */
24680 if (newimm == (unsigned int) FAIL
24681 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
24682 {
24683 /* No ? OK - try using two ADD instructions to generate
24684 the value. */
24685 newimm = validate_immediate_twopart (value, & highpart);
24686
24687 /* Yes - then make sure that the second instruction is
24688 also an add. */
24689 if (newimm != (unsigned int) FAIL)
24690 newinsn = temp;
24691 /* Still No ? Try using a negated value. */
24692 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
24693 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
24694 /* Otherwise - give up. */
24695 else
24696 {
24697 as_bad_where (fixP->fx_file, fixP->fx_line,
24698 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
24699 (long) value);
24700 break;
24701 }
24702
24703 /* Replace the first operand in the 2nd instruction (which
24704 is the PC) with the destination register. We have
24705 already added in the PC in the first instruction and we
24706 do not want to do it again. */
24707 newinsn &= ~ 0xf0000;
24708 newinsn |= ((newinsn & 0x0f000) << 4);
24709 }
24710
24711 newimm |= (temp & 0xfffff000);
24712 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
24713
24714 highpart |= (newinsn & 0xfffff000);
24715 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
24716 }
24717 break;
24718
24719 case BFD_RELOC_ARM_OFFSET_IMM:
24720 if (!fixP->fx_done && seg->use_rela_p)
24721 value = 0;
24722 /* Fall through. */
24723
24724 case BFD_RELOC_ARM_LITERAL:
24725 sign = value > 0;
24726
24727 if (value < 0)
24728 value = - value;
24729
24730 if (validate_offset_imm (value, 0) == FAIL)
24731 {
24732 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
24733 as_bad_where (fixP->fx_file, fixP->fx_line,
24734 _("invalid literal constant: pool needs to be closer"));
24735 else
24736 as_bad_where (fixP->fx_file, fixP->fx_line,
24737 _("bad immediate value for offset (%ld)"),
24738 (long) value);
24739 break;
24740 }
24741
24742 newval = md_chars_to_number (buf, INSN_SIZE);
24743 if (value == 0)
24744 newval &= 0xfffff000;
24745 else
24746 {
24747 newval &= 0xff7ff000;
24748 newval |= value | (sign ? INDEX_UP : 0);
24749 }
24750 md_number_to_chars (buf, newval, INSN_SIZE);
24751 break;
24752
24753 case BFD_RELOC_ARM_OFFSET_IMM8:
24754 case BFD_RELOC_ARM_HWLITERAL:
24755 sign = value > 0;
24756
24757 if (value < 0)
24758 value = - value;
24759
24760 if (validate_offset_imm (value, 1) == FAIL)
24761 {
24762 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
24763 as_bad_where (fixP->fx_file, fixP->fx_line,
24764 _("invalid literal constant: pool needs to be closer"));
24765 else
24766 as_bad_where (fixP->fx_file, fixP->fx_line,
24767 _("bad immediate value for 8-bit offset (%ld)"),
24768 (long) value);
24769 break;
24770 }
24771
24772 newval = md_chars_to_number (buf, INSN_SIZE);
24773 if (value == 0)
24774 newval &= 0xfffff0f0;
24775 else
24776 {
24777 newval &= 0xff7ff0f0;
24778 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
24779 }
24780 md_number_to_chars (buf, newval, INSN_SIZE);
24781 break;
24782
24783 case BFD_RELOC_ARM_T32_OFFSET_U8:
24784 if (value < 0 || value > 1020 || value % 4 != 0)
24785 as_bad_where (fixP->fx_file, fixP->fx_line,
24786 _("bad immediate value for offset (%ld)"), (long) value);
24787 value /= 4;
24788
24789 newval = md_chars_to_number (buf+2, THUMB_SIZE);
24790 newval |= value;
24791 md_number_to_chars (buf+2, newval, THUMB_SIZE);
24792 break;
24793
24794 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24795 /* This is a complicated relocation used for all varieties of Thumb32
24796 load/store instruction with immediate offset:
24797
24798 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24799 *4, optional writeback(W)
24800 (doubleword load/store)
24801
24802 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24803 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24804 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24805 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24806 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24807
24808 Uppercase letters indicate bits that are already encoded at
24809 this point. Lowercase letters are our problem. For the
24810 second block of instructions, the secondary opcode nybble
24811 (bits 8..11) is present, and bit 23 is zero, even if this is
24812 a PC-relative operation. */
24813 newval = md_chars_to_number (buf, THUMB_SIZE);
24814 newval <<= 16;
24815 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
24816
24817 if ((newval & 0xf0000000) == 0xe0000000)
24818 {
24819 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24820 if (value >= 0)
24821 newval |= (1 << 23);
24822 else
24823 value = -value;
24824 if (value % 4 != 0)
24825 {
24826 as_bad_where (fixP->fx_file, fixP->fx_line,
24827 _("offset not a multiple of 4"));
24828 break;
24829 }
24830 value /= 4;
24831 if (value > 0xff)
24832 {
24833 as_bad_where (fixP->fx_file, fixP->fx_line,
24834 _("offset out of range"));
24835 break;
24836 }
24837 newval &= ~0xff;
24838 }
24839 else if ((newval & 0x000f0000) == 0x000f0000)
24840 {
24841 /* PC-relative, 12-bit offset. */
24842 if (value >= 0)
24843 newval |= (1 << 23);
24844 else
24845 value = -value;
24846 if (value > 0xfff)
24847 {
24848 as_bad_where (fixP->fx_file, fixP->fx_line,
24849 _("offset out of range"));
24850 break;
24851 }
24852 newval &= ~0xfff;
24853 }
24854 else if ((newval & 0x00000100) == 0x00000100)
24855 {
24856 /* Writeback: 8-bit, +/- offset. */
24857 if (value >= 0)
24858 newval |= (1 << 9);
24859 else
24860 value = -value;
24861 if (value > 0xff)
24862 {
24863 as_bad_where (fixP->fx_file, fixP->fx_line,
24864 _("offset out of range"));
24865 break;
24866 }
24867 newval &= ~0xff;
24868 }
24869 else if ((newval & 0x00000f00) == 0x00000e00)
24870 {
24871 /* T-instruction: positive 8-bit offset. */
24872 if (value < 0 || value > 0xff)
24873 {
24874 as_bad_where (fixP->fx_file, fixP->fx_line,
24875 _("offset out of range"));
24876 break;
24877 }
24878 newval &= ~0xff;
24879 newval |= value;
24880 }
24881 else
24882 {
24883 /* Positive 12-bit or negative 8-bit offset. */
24884 int limit;
24885 if (value >= 0)
24886 {
24887 newval |= (1 << 23);
24888 limit = 0xfff;
24889 }
24890 else
24891 {
24892 value = -value;
24893 limit = 0xff;
24894 }
24895 if (value > limit)
24896 {
24897 as_bad_where (fixP->fx_file, fixP->fx_line,
24898 _("offset out of range"));
24899 break;
24900 }
24901 newval &= ~limit;
24902 }
24903
24904 newval |= value;
24905 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
24906 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
24907 break;
24908
24909 case BFD_RELOC_ARM_SHIFT_IMM:
24910 newval = md_chars_to_number (buf, INSN_SIZE);
24911 if (((unsigned long) value) > 32
24912 || (value == 32
24913 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
24914 {
24915 as_bad_where (fixP->fx_file, fixP->fx_line,
24916 _("shift expression is too large"));
24917 break;
24918 }
24919
24920 if (value == 0)
24921 /* Shifts of zero must be done as lsl. */
24922 newval &= ~0x60;
24923 else if (value == 32)
24924 value = 0;
24925 newval &= 0xfffff07f;
24926 newval |= (value & 0x1f) << 7;
24927 md_number_to_chars (buf, newval, INSN_SIZE);
24928 break;
24929
24930 case BFD_RELOC_ARM_T32_IMMEDIATE:
24931 case BFD_RELOC_ARM_T32_ADD_IMM:
24932 case BFD_RELOC_ARM_T32_IMM12:
24933 case BFD_RELOC_ARM_T32_ADD_PC12:
24934 /* We claim that this fixup has been processed here,
24935 even if in fact we generate an error because we do
24936 not have a reloc for it, so tc_gen_reloc will reject it. */
24937 fixP->fx_done = 1;
24938
24939 if (fixP->fx_addsy
24940 && ! S_IS_DEFINED (fixP->fx_addsy))
24941 {
24942 as_bad_where (fixP->fx_file, fixP->fx_line,
24943 _("undefined symbol %s used as an immediate value"),
24944 S_GET_NAME (fixP->fx_addsy));
24945 break;
24946 }
24947
24948 newval = md_chars_to_number (buf, THUMB_SIZE);
24949 newval <<= 16;
24950 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
24951
24952 newimm = FAIL;
24953 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24954 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24955 Thumb2 modified immediate encoding (T2). */
24956 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
24957 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24958 {
24959 newimm = encode_thumb32_immediate (value);
24960 if (newimm == (unsigned int) FAIL)
24961 newimm = thumb32_negate_data_op (&newval, value);
24962 }
24963 if (newimm == (unsigned int) FAIL)
24964 {
24965 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
24966 {
24967 /* Turn add/sum into addw/subw. */
24968 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24969 newval = (newval & 0xfeffffff) | 0x02000000;
24970 /* No flat 12-bit imm encoding for addsw/subsw. */
24971 if ((newval & 0x00100000) == 0)
24972 {
24973 /* 12 bit immediate for addw/subw. */
24974 if (value < 0)
24975 {
24976 value = -value;
24977 newval ^= 0x00a00000;
24978 }
24979 if (value > 0xfff)
24980 newimm = (unsigned int) FAIL;
24981 else
24982 newimm = value;
24983 }
24984 }
24985 else
24986 {
24987 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24988 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24989 disassembling, MOV is preferred when there is no encoding
24990 overlap. */
24991 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
24992 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24993 but with the Rn field [19:16] set to 1111. */
24994 && (((newval >> 16) & 0xf) == 0xf)
24995 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
24996 && !((newval >> T2_SBIT_SHIFT) & 0x1)
24997 && value >= 0 && value <= 0xffff)
24998 {
24999 /* Toggle bit[25] to change encoding from T2 to T3. */
25000 newval ^= 1 << 25;
25001 /* Clear bits[19:16]. */
25002 newval &= 0xfff0ffff;
25003 /* Encoding high 4bits imm. Code below will encode the
25004 remaining low 12bits. */
25005 newval |= (value & 0x0000f000) << 4;
25006 newimm = value & 0x00000fff;
25007 }
25008 }
25009 }
25010
25011 if (newimm == (unsigned int)FAIL)
25012 {
25013 as_bad_where (fixP->fx_file, fixP->fx_line,
25014 _("invalid constant (%lx) after fixup"),
25015 (unsigned long) value);
25016 break;
25017 }
25018
25019 newval |= (newimm & 0x800) << 15;
25020 newval |= (newimm & 0x700) << 4;
25021 newval |= (newimm & 0x0ff);
25022
25023 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
25024 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
25025 break;
25026
25027 case BFD_RELOC_ARM_SMC:
25028 if (((unsigned long) value) > 0xffff)
25029 as_bad_where (fixP->fx_file, fixP->fx_line,
25030 _("invalid smc expression"));
25031 newval = md_chars_to_number (buf, INSN_SIZE);
25032 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25033 md_number_to_chars (buf, newval, INSN_SIZE);
25034 break;
25035
25036 case BFD_RELOC_ARM_HVC:
25037 if (((unsigned long) value) > 0xffff)
25038 as_bad_where (fixP->fx_file, fixP->fx_line,
25039 _("invalid hvc expression"));
25040 newval = md_chars_to_number (buf, INSN_SIZE);
25041 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25042 md_number_to_chars (buf, newval, INSN_SIZE);
25043 break;
25044
25045 case BFD_RELOC_ARM_SWI:
25046 if (fixP->tc_fix_data != 0)
25047 {
25048 if (((unsigned long) value) > 0xff)
25049 as_bad_where (fixP->fx_file, fixP->fx_line,
25050 _("invalid swi expression"));
25051 newval = md_chars_to_number (buf, THUMB_SIZE);
25052 newval |= value;
25053 md_number_to_chars (buf, newval, THUMB_SIZE);
25054 }
25055 else
25056 {
25057 if (((unsigned long) value) > 0x00ffffff)
25058 as_bad_where (fixP->fx_file, fixP->fx_line,
25059 _("invalid swi expression"));
25060 newval = md_chars_to_number (buf, INSN_SIZE);
25061 newval |= value;
25062 md_number_to_chars (buf, newval, INSN_SIZE);
25063 }
25064 break;
25065
25066 case BFD_RELOC_ARM_MULTI:
25067 if (((unsigned long) value) > 0xffff)
25068 as_bad_where (fixP->fx_file, fixP->fx_line,
25069 _("invalid expression in load/store multiple"));
25070 newval = value | md_chars_to_number (buf, INSN_SIZE);
25071 md_number_to_chars (buf, newval, INSN_SIZE);
25072 break;
25073
25074 #ifdef OBJ_ELF
25075 case BFD_RELOC_ARM_PCREL_CALL:
25076
25077 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25078 && fixP->fx_addsy
25079 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25080 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25081 && THUMB_IS_FUNC (fixP->fx_addsy))
25082 /* Flip the bl to blx. This is a simple flip
25083 bit here because we generate PCREL_CALL for
25084 unconditional bls. */
25085 {
25086 newval = md_chars_to_number (buf, INSN_SIZE);
25087 newval = newval | 0x10000000;
25088 md_number_to_chars (buf, newval, INSN_SIZE);
25089 temp = 1;
25090 fixP->fx_done = 1;
25091 }
25092 else
25093 temp = 3;
25094 goto arm_branch_common;
25095
25096 case BFD_RELOC_ARM_PCREL_JUMP:
25097 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25098 && fixP->fx_addsy
25099 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25100 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25101 && THUMB_IS_FUNC (fixP->fx_addsy))
25102 {
25103 /* This would map to a bl<cond>, b<cond>,
25104 b<always> to a Thumb function. We
25105 need to force a relocation for this particular
25106 case. */
25107 newval = md_chars_to_number (buf, INSN_SIZE);
25108 fixP->fx_done = 0;
25109 }
25110 /* Fall through. */
25111
25112 case BFD_RELOC_ARM_PLT32:
25113 #endif
25114 case BFD_RELOC_ARM_PCREL_BRANCH:
25115 temp = 3;
25116 goto arm_branch_common;
25117
25118 case BFD_RELOC_ARM_PCREL_BLX:
25119
25120 temp = 1;
25121 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25122 && fixP->fx_addsy
25123 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25124 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25125 && ARM_IS_FUNC (fixP->fx_addsy))
25126 {
25127 /* Flip the blx to a bl and warn. */
25128 const char *name = S_GET_NAME (fixP->fx_addsy);
25129 newval = 0xeb000000;
25130 as_warn_where (fixP->fx_file, fixP->fx_line,
25131 _("blx to '%s' an ARM ISA state function changed to bl"),
25132 name);
25133 md_number_to_chars (buf, newval, INSN_SIZE);
25134 temp = 3;
25135 fixP->fx_done = 1;
25136 }
25137
25138 #ifdef OBJ_ELF
25139 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25140 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
25141 #endif
25142
25143 arm_branch_common:
25144 /* We are going to store value (shifted right by two) in the
25145 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25146 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25147 also be clear. */
25148 if (value & temp)
25149 as_bad_where (fixP->fx_file, fixP->fx_line,
25150 _("misaligned branch destination"));
25151 if ((value & (offsetT)0xfe000000) != (offsetT)0
25152 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
25153 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25154
25155 if (fixP->fx_done || !seg->use_rela_p)
25156 {
25157 newval = md_chars_to_number (buf, INSN_SIZE);
25158 newval |= (value >> 2) & 0x00ffffff;
25159 /* Set the H bit on BLX instructions. */
25160 if (temp == 1)
25161 {
25162 if (value & 2)
25163 newval |= 0x01000000;
25164 else
25165 newval &= ~0x01000000;
25166 }
25167 md_number_to_chars (buf, newval, INSN_SIZE);
25168 }
25169 break;
25170
25171 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
25172 /* CBZ can only branch forward. */
25173
25174 /* Attempts to use CBZ to branch to the next instruction
25175 (which, strictly speaking, are prohibited) will be turned into
25176 no-ops.
25177
25178 FIXME: It may be better to remove the instruction completely and
25179 perform relaxation. */
25180 if (value == -2)
25181 {
25182 newval = md_chars_to_number (buf, THUMB_SIZE);
25183 newval = 0xbf00; /* NOP encoding T1 */
25184 md_number_to_chars (buf, newval, THUMB_SIZE);
25185 }
25186 else
25187 {
25188 if (value & ~0x7e)
25189 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25190
25191 if (fixP->fx_done || !seg->use_rela_p)
25192 {
25193 newval = md_chars_to_number (buf, THUMB_SIZE);
25194 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
25195 md_number_to_chars (buf, newval, THUMB_SIZE);
25196 }
25197 }
25198 break;
25199
25200 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
25201 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
25202 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25203
25204 if (fixP->fx_done || !seg->use_rela_p)
25205 {
25206 newval = md_chars_to_number (buf, THUMB_SIZE);
25207 newval |= (value & 0x1ff) >> 1;
25208 md_number_to_chars (buf, newval, THUMB_SIZE);
25209 }
25210 break;
25211
25212 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
25213 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
25214 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25215
25216 if (fixP->fx_done || !seg->use_rela_p)
25217 {
25218 newval = md_chars_to_number (buf, THUMB_SIZE);
25219 newval |= (value & 0xfff) >> 1;
25220 md_number_to_chars (buf, newval, THUMB_SIZE);
25221 }
25222 break;
25223
25224 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25225 if (fixP->fx_addsy
25226 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25227 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25228 && ARM_IS_FUNC (fixP->fx_addsy)
25229 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25230 {
25231 /* Force a relocation for a branch 20 bits wide. */
25232 fixP->fx_done = 0;
25233 }
25234 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
25235 as_bad_where (fixP->fx_file, fixP->fx_line,
25236 _("conditional branch out of range"));
25237
25238 if (fixP->fx_done || !seg->use_rela_p)
25239 {
25240 offsetT newval2;
25241 addressT S, J1, J2, lo, hi;
25242
25243 S = (value & 0x00100000) >> 20;
25244 J2 = (value & 0x00080000) >> 19;
25245 J1 = (value & 0x00040000) >> 18;
25246 hi = (value & 0x0003f000) >> 12;
25247 lo = (value & 0x00000ffe) >> 1;
25248
25249 newval = md_chars_to_number (buf, THUMB_SIZE);
25250 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25251 newval |= (S << 10) | hi;
25252 newval2 |= (J1 << 13) | (J2 << 11) | lo;
25253 md_number_to_chars (buf, newval, THUMB_SIZE);
25254 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25255 }
25256 break;
25257
25258 case BFD_RELOC_THUMB_PCREL_BLX:
25259 /* If there is a blx from a thumb state function to
25260 another thumb function flip this to a bl and warn
25261 about it. */
25262
25263 if (fixP->fx_addsy
25264 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25265 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25266 && THUMB_IS_FUNC (fixP->fx_addsy))
25267 {
25268 const char *name = S_GET_NAME (fixP->fx_addsy);
25269 as_warn_where (fixP->fx_file, fixP->fx_line,
25270 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25271 name);
25272 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25273 newval = newval | 0x1000;
25274 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25275 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25276 fixP->fx_done = 1;
25277 }
25278
25279
25280 goto thumb_bl_common;
25281
25282 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25283 /* A bl from Thumb state ISA to an internal ARM state function
25284 is converted to a blx. */
25285 if (fixP->fx_addsy
25286 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25287 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25288 && ARM_IS_FUNC (fixP->fx_addsy)
25289 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25290 {
25291 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25292 newval = newval & ~0x1000;
25293 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25294 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
25295 fixP->fx_done = 1;
25296 }
25297
25298 thumb_bl_common:
25299
25300 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25301 /* For a BLX instruction, make sure that the relocation is rounded up
25302 to a word boundary. This follows the semantics of the instruction
25303 which specifies that bit 1 of the target address will come from bit
25304 1 of the base address. */
25305 value = (value + 3) & ~ 3;
25306
25307 #ifdef OBJ_ELF
25308 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
25309 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25310 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25311 #endif
25312
25313 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
25314 {
25315 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
25316 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25317 else if ((value & ~0x1ffffff)
25318 && ((value & ~0x1ffffff) != ~0x1ffffff))
25319 as_bad_where (fixP->fx_file, fixP->fx_line,
25320 _("Thumb2 branch out of range"));
25321 }
25322
25323 if (fixP->fx_done || !seg->use_rela_p)
25324 encode_thumb2_b_bl_offset (buf, value);
25325
25326 break;
25327
25328 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25329 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
25330 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25331
25332 if (fixP->fx_done || !seg->use_rela_p)
25333 encode_thumb2_b_bl_offset (buf, value);
25334
25335 break;
25336
25337 case BFD_RELOC_8:
25338 if (fixP->fx_done || !seg->use_rela_p)
25339 *buf = value;
25340 break;
25341
25342 case BFD_RELOC_16:
25343 if (fixP->fx_done || !seg->use_rela_p)
25344 md_number_to_chars (buf, value, 2);
25345 break;
25346
25347 #ifdef OBJ_ELF
25348 case BFD_RELOC_ARM_TLS_CALL:
25349 case BFD_RELOC_ARM_THM_TLS_CALL:
25350 case BFD_RELOC_ARM_TLS_DESCSEQ:
25351 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25352 case BFD_RELOC_ARM_TLS_GOTDESC:
25353 case BFD_RELOC_ARM_TLS_GD32:
25354 case BFD_RELOC_ARM_TLS_LE32:
25355 case BFD_RELOC_ARM_TLS_IE32:
25356 case BFD_RELOC_ARM_TLS_LDM32:
25357 case BFD_RELOC_ARM_TLS_LDO32:
25358 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25359 break;
25360
25361 /* Same handling as above, but with the arm_fdpic guard. */
25362 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25363 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25364 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25365 if (arm_fdpic)
25366 {
25367 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25368 }
25369 else
25370 {
25371 as_bad_where (fixP->fx_file, fixP->fx_line,
25372 _("Relocation supported only in FDPIC mode"));
25373 }
25374 break;
25375
25376 case BFD_RELOC_ARM_GOT32:
25377 case BFD_RELOC_ARM_GOTOFF:
25378 break;
25379
25380 case BFD_RELOC_ARM_GOT_PREL:
25381 if (fixP->fx_done || !seg->use_rela_p)
25382 md_number_to_chars (buf, value, 4);
25383 break;
25384
25385 case BFD_RELOC_ARM_TARGET2:
25386 /* TARGET2 is not partial-inplace, so we need to write the
25387 addend here for REL targets, because it won't be written out
25388 during reloc processing later. */
25389 if (fixP->fx_done || !seg->use_rela_p)
25390 md_number_to_chars (buf, fixP->fx_offset, 4);
25391 break;
25392
25393 /* Relocations for FDPIC. */
25394 case BFD_RELOC_ARM_GOTFUNCDESC:
25395 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25396 case BFD_RELOC_ARM_FUNCDESC:
25397 if (arm_fdpic)
25398 {
25399 if (fixP->fx_done || !seg->use_rela_p)
25400 md_number_to_chars (buf, 0, 4);
25401 }
25402 else
25403 {
25404 as_bad_where (fixP->fx_file, fixP->fx_line,
25405 _("Relocation supported only in FDPIC mode"));
25406 }
25407 break;
25408 #endif
25409
25410 case BFD_RELOC_RVA:
25411 case BFD_RELOC_32:
25412 case BFD_RELOC_ARM_TARGET1:
25413 case BFD_RELOC_ARM_ROSEGREL32:
25414 case BFD_RELOC_ARM_SBREL32:
25415 case BFD_RELOC_32_PCREL:
25416 #ifdef TE_PE
25417 case BFD_RELOC_32_SECREL:
25418 #endif
25419 if (fixP->fx_done || !seg->use_rela_p)
25420 #ifdef TE_WINCE
25421 /* For WinCE we only do this for pcrel fixups. */
25422 if (fixP->fx_done || fixP->fx_pcrel)
25423 #endif
25424 md_number_to_chars (buf, value, 4);
25425 break;
25426
25427 #ifdef OBJ_ELF
25428 case BFD_RELOC_ARM_PREL31:
25429 if (fixP->fx_done || !seg->use_rela_p)
25430 {
25431 newval = md_chars_to_number (buf, 4) & 0x80000000;
25432 if ((value ^ (value >> 1)) & 0x40000000)
25433 {
25434 as_bad_where (fixP->fx_file, fixP->fx_line,
25435 _("rel31 relocation overflow"));
25436 }
25437 newval |= value & 0x7fffffff;
25438 md_number_to_chars (buf, newval, 4);
25439 }
25440 break;
25441 #endif
25442
25443 case BFD_RELOC_ARM_CP_OFF_IMM:
25444 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
25445 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
25446 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
25447 newval = md_chars_to_number (buf, INSN_SIZE);
25448 else
25449 newval = get_thumb32_insn (buf);
25450 if ((newval & 0x0f200f00) == 0x0d000900)
25451 {
25452 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25453 has permitted values that are multiples of 2, in the range 0
25454 to 510. */
25455 if (value < -510 || value > 510 || (value & 1))
25456 as_bad_where (fixP->fx_file, fixP->fx_line,
25457 _("co-processor offset out of range"));
25458 }
25459 else if ((newval & 0xfe001f80) == 0xec000f80)
25460 {
25461 if (value < -511 || value > 512 || (value & 3))
25462 as_bad_where (fixP->fx_file, fixP->fx_line,
25463 _("co-processor offset out of range"));
25464 }
25465 else if (value < -1023 || value > 1023 || (value & 3))
25466 as_bad_where (fixP->fx_file, fixP->fx_line,
25467 _("co-processor offset out of range"));
25468 cp_off_common:
25469 sign = value > 0;
25470 if (value < 0)
25471 value = -value;
25472 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25473 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25474 newval = md_chars_to_number (buf, INSN_SIZE);
25475 else
25476 newval = get_thumb32_insn (buf);
25477 if (value == 0)
25478 {
25479 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25480 newval &= 0xffffff80;
25481 else
25482 newval &= 0xffffff00;
25483 }
25484 else
25485 {
25486 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25487 newval &= 0xff7fff80;
25488 else
25489 newval &= 0xff7fff00;
25490 if ((newval & 0x0f200f00) == 0x0d000900)
25491 {
25492 /* This is a fp16 vstr/vldr.
25493
25494 It requires the immediate offset in the instruction is shifted
25495 left by 1 to be a half-word offset.
25496
25497 Here, left shift by 1 first, and later right shift by 2
25498 should get the right offset. */
25499 value <<= 1;
25500 }
25501 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
25502 }
25503 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25504 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25505 md_number_to_chars (buf, newval, INSN_SIZE);
25506 else
25507 put_thumb32_insn (buf, newval);
25508 break;
25509
25510 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
25511 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
25512 if (value < -255 || value > 255)
25513 as_bad_where (fixP->fx_file, fixP->fx_line,
25514 _("co-processor offset out of range"));
25515 value *= 4;
25516 goto cp_off_common;
25517
25518 case BFD_RELOC_ARM_THUMB_OFFSET:
25519 newval = md_chars_to_number (buf, THUMB_SIZE);
25520 /* Exactly what ranges, and where the offset is inserted depends
25521 on the type of instruction, we can establish this from the
25522 top 4 bits. */
25523 switch (newval >> 12)
25524 {
25525 case 4: /* PC load. */
25526 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25527 forced to zero for these loads; md_pcrel_from has already
25528 compensated for this. */
25529 if (value & 3)
25530 as_bad_where (fixP->fx_file, fixP->fx_line,
25531 _("invalid offset, target not word aligned (0x%08lX)"),
25532 (((unsigned long) fixP->fx_frag->fr_address
25533 + (unsigned long) fixP->fx_where) & ~3)
25534 + (unsigned long) value);
25535
25536 if (value & ~0x3fc)
25537 as_bad_where (fixP->fx_file, fixP->fx_line,
25538 _("invalid offset, value too big (0x%08lX)"),
25539 (long) value);
25540
25541 newval |= value >> 2;
25542 break;
25543
25544 case 9: /* SP load/store. */
25545 if (value & ~0x3fc)
25546 as_bad_where (fixP->fx_file, fixP->fx_line,
25547 _("invalid offset, value too big (0x%08lX)"),
25548 (long) value);
25549 newval |= value >> 2;
25550 break;
25551
25552 case 6: /* Word load/store. */
25553 if (value & ~0x7c)
25554 as_bad_where (fixP->fx_file, fixP->fx_line,
25555 _("invalid offset, value too big (0x%08lX)"),
25556 (long) value);
25557 newval |= value << 4; /* 6 - 2. */
25558 break;
25559
25560 case 7: /* Byte load/store. */
25561 if (value & ~0x1f)
25562 as_bad_where (fixP->fx_file, fixP->fx_line,
25563 _("invalid offset, value too big (0x%08lX)"),
25564 (long) value);
25565 newval |= value << 6;
25566 break;
25567
25568 case 8: /* Halfword load/store. */
25569 if (value & ~0x3e)
25570 as_bad_where (fixP->fx_file, fixP->fx_line,
25571 _("invalid offset, value too big (0x%08lX)"),
25572 (long) value);
25573 newval |= value << 5; /* 6 - 1. */
25574 break;
25575
25576 default:
25577 as_bad_where (fixP->fx_file, fixP->fx_line,
25578 "Unable to process relocation for thumb opcode: %lx",
25579 (unsigned long) newval);
25580 break;
25581 }
25582 md_number_to_chars (buf, newval, THUMB_SIZE);
25583 break;
25584
25585 case BFD_RELOC_ARM_THUMB_ADD:
25586 /* This is a complicated relocation, since we use it for all of
25587 the following immediate relocations:
25588
25589 3bit ADD/SUB
25590 8bit ADD/SUB
25591 9bit ADD/SUB SP word-aligned
25592 10bit ADD PC/SP word-aligned
25593
25594 The type of instruction being processed is encoded in the
25595 instruction field:
25596
25597 0x8000 SUB
25598 0x00F0 Rd
25599 0x000F Rs
25600 */
25601 newval = md_chars_to_number (buf, THUMB_SIZE);
25602 {
25603 int rd = (newval >> 4) & 0xf;
25604 int rs = newval & 0xf;
25605 int subtract = !!(newval & 0x8000);
25606
25607 /* Check for HI regs, only very restricted cases allowed:
25608 Adjusting SP, and using PC or SP to get an address. */
25609 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
25610 || (rs > 7 && rs != REG_SP && rs != REG_PC))
25611 as_bad_where (fixP->fx_file, fixP->fx_line,
25612 _("invalid Hi register with immediate"));
25613
25614 /* If value is negative, choose the opposite instruction. */
25615 if (value < 0)
25616 {
25617 value = -value;
25618 subtract = !subtract;
25619 if (value < 0)
25620 as_bad_where (fixP->fx_file, fixP->fx_line,
25621 _("immediate value out of range"));
25622 }
25623
25624 if (rd == REG_SP)
25625 {
25626 if (value & ~0x1fc)
25627 as_bad_where (fixP->fx_file, fixP->fx_line,
25628 _("invalid immediate for stack address calculation"));
25629 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
25630 newval |= value >> 2;
25631 }
25632 else if (rs == REG_PC || rs == REG_SP)
25633 {
25634 /* PR gas/18541. If the addition is for a defined symbol
25635 within range of an ADR instruction then accept it. */
25636 if (subtract
25637 && value == 4
25638 && fixP->fx_addsy != NULL)
25639 {
25640 subtract = 0;
25641
25642 if (! S_IS_DEFINED (fixP->fx_addsy)
25643 || S_GET_SEGMENT (fixP->fx_addsy) != seg
25644 || S_IS_WEAK (fixP->fx_addsy))
25645 {
25646 as_bad_where (fixP->fx_file, fixP->fx_line,
25647 _("address calculation needs a strongly defined nearby symbol"));
25648 }
25649 else
25650 {
25651 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
25652
25653 /* Round up to the next 4-byte boundary. */
25654 if (v & 3)
25655 v = (v + 3) & ~ 3;
25656 else
25657 v += 4;
25658 v = S_GET_VALUE (fixP->fx_addsy) - v;
25659
25660 if (v & ~0x3fc)
25661 {
25662 as_bad_where (fixP->fx_file, fixP->fx_line,
25663 _("symbol too far away"));
25664 }
25665 else
25666 {
25667 fixP->fx_done = 1;
25668 value = v;
25669 }
25670 }
25671 }
25672
25673 if (subtract || value & ~0x3fc)
25674 as_bad_where (fixP->fx_file, fixP->fx_line,
25675 _("invalid immediate for address calculation (value = 0x%08lX)"),
25676 (unsigned long) (subtract ? - value : value));
25677 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
25678 newval |= rd << 8;
25679 newval |= value >> 2;
25680 }
25681 else if (rs == rd)
25682 {
25683 if (value & ~0xff)
25684 as_bad_where (fixP->fx_file, fixP->fx_line,
25685 _("immediate value out of range"));
25686 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
25687 newval |= (rd << 8) | value;
25688 }
25689 else
25690 {
25691 if (value & ~0x7)
25692 as_bad_where (fixP->fx_file, fixP->fx_line,
25693 _("immediate value out of range"));
25694 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
25695 newval |= rd | (rs << 3) | (value << 6);
25696 }
25697 }
25698 md_number_to_chars (buf, newval, THUMB_SIZE);
25699 break;
25700
25701 case BFD_RELOC_ARM_THUMB_IMM:
25702 newval = md_chars_to_number (buf, THUMB_SIZE);
25703 if (value < 0 || value > 255)
25704 as_bad_where (fixP->fx_file, fixP->fx_line,
25705 _("invalid immediate: %ld is out of range"),
25706 (long) value);
25707 newval |= value;
25708 md_number_to_chars (buf, newval, THUMB_SIZE);
25709 break;
25710
25711 case BFD_RELOC_ARM_THUMB_SHIFT:
25712 /* 5bit shift value (0..32). LSL cannot take 32. */
25713 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
25714 temp = newval & 0xf800;
25715 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
25716 as_bad_where (fixP->fx_file, fixP->fx_line,
25717 _("invalid shift value: %ld"), (long) value);
25718 /* Shifts of zero must be encoded as LSL. */
25719 if (value == 0)
25720 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
25721 /* Shifts of 32 are encoded as zero. */
25722 else if (value == 32)
25723 value = 0;
25724 newval |= value << 6;
25725 md_number_to_chars (buf, newval, THUMB_SIZE);
25726 break;
25727
25728 case BFD_RELOC_VTABLE_INHERIT:
25729 case BFD_RELOC_VTABLE_ENTRY:
25730 fixP->fx_done = 0;
25731 return;
25732
25733 case BFD_RELOC_ARM_MOVW:
25734 case BFD_RELOC_ARM_MOVT:
25735 case BFD_RELOC_ARM_THUMB_MOVW:
25736 case BFD_RELOC_ARM_THUMB_MOVT:
25737 if (fixP->fx_done || !seg->use_rela_p)
25738 {
25739 /* REL format relocations are limited to a 16-bit addend. */
25740 if (!fixP->fx_done)
25741 {
25742 if (value < -0x8000 || value > 0x7fff)
25743 as_bad_where (fixP->fx_file, fixP->fx_line,
25744 _("offset out of range"));
25745 }
25746 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25747 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
25748 {
25749 value >>= 16;
25750 }
25751
25752 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25753 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
25754 {
25755 newval = get_thumb32_insn (buf);
25756 newval &= 0xfbf08f00;
25757 newval |= (value & 0xf000) << 4;
25758 newval |= (value & 0x0800) << 15;
25759 newval |= (value & 0x0700) << 4;
25760 newval |= (value & 0x00ff);
25761 put_thumb32_insn (buf, newval);
25762 }
25763 else
25764 {
25765 newval = md_chars_to_number (buf, 4);
25766 newval &= 0xfff0f000;
25767 newval |= value & 0x0fff;
25768 newval |= (value & 0xf000) << 4;
25769 md_number_to_chars (buf, newval, 4);
25770 }
25771 }
25772 return;
25773
25774 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
25775 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
25776 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
25777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
25778 gas_assert (!fixP->fx_done);
25779 {
25780 bfd_vma insn;
25781 bfd_boolean is_mov;
25782 bfd_vma encoded_addend = value;
25783
25784 /* Check that addend can be encoded in instruction. */
25785 if (!seg->use_rela_p && (value < 0 || value > 255))
25786 as_bad_where (fixP->fx_file, fixP->fx_line,
25787 _("the offset 0x%08lX is not representable"),
25788 (unsigned long) encoded_addend);
25789
25790 /* Extract the instruction. */
25791 insn = md_chars_to_number (buf, THUMB_SIZE);
25792 is_mov = (insn & 0xf800) == 0x2000;
25793
25794 /* Encode insn. */
25795 if (is_mov)
25796 {
25797 if (!seg->use_rela_p)
25798 insn |= encoded_addend;
25799 }
25800 else
25801 {
25802 int rd, rs;
25803
25804 /* Extract the instruction. */
25805 /* Encoding is the following
25806 0x8000 SUB
25807 0x00F0 Rd
25808 0x000F Rs
25809 */
25810 /* The following conditions must be true :
25811 - ADD
25812 - Rd == Rs
25813 - Rd <= 7
25814 */
25815 rd = (insn >> 4) & 0xf;
25816 rs = insn & 0xf;
25817 if ((insn & 0x8000) || (rd != rs) || rd > 7)
25818 as_bad_where (fixP->fx_file, fixP->fx_line,
25819 _("Unable to process relocation for thumb opcode: %lx"),
25820 (unsigned long) insn);
25821
25822 /* Encode as ADD immediate8 thumb 1 code. */
25823 insn = 0x3000 | (rd << 8);
25824
25825 /* Place the encoded addend into the first 8 bits of the
25826 instruction. */
25827 if (!seg->use_rela_p)
25828 insn |= encoded_addend;
25829 }
25830
25831 /* Update the instruction. */
25832 md_number_to_chars (buf, insn, THUMB_SIZE);
25833 }
25834 break;
25835
25836 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25837 case BFD_RELOC_ARM_ALU_PC_G0:
25838 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25839 case BFD_RELOC_ARM_ALU_PC_G1:
25840 case BFD_RELOC_ARM_ALU_PC_G2:
25841 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25842 case BFD_RELOC_ARM_ALU_SB_G0:
25843 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25844 case BFD_RELOC_ARM_ALU_SB_G1:
25845 case BFD_RELOC_ARM_ALU_SB_G2:
25846 gas_assert (!fixP->fx_done);
25847 if (!seg->use_rela_p)
25848 {
25849 bfd_vma insn;
25850 bfd_vma encoded_addend;
25851 bfd_vma addend_abs = llabs (value);
25852
25853 /* Check that the absolute value of the addend can be
25854 expressed as an 8-bit constant plus a rotation. */
25855 encoded_addend = encode_arm_immediate (addend_abs);
25856 if (encoded_addend == (unsigned int) FAIL)
25857 as_bad_where (fixP->fx_file, fixP->fx_line,
25858 _("the offset 0x%08lX is not representable"),
25859 (unsigned long) addend_abs);
25860
25861 /* Extract the instruction. */
25862 insn = md_chars_to_number (buf, INSN_SIZE);
25863
25864 /* If the addend is positive, use an ADD instruction.
25865 Otherwise use a SUB. Take care not to destroy the S bit. */
25866 insn &= 0xff1fffff;
25867 if (value < 0)
25868 insn |= 1 << 22;
25869 else
25870 insn |= 1 << 23;
25871
25872 /* Place the encoded addend into the first 12 bits of the
25873 instruction. */
25874 insn &= 0xfffff000;
25875 insn |= encoded_addend;
25876
25877 /* Update the instruction. */
25878 md_number_to_chars (buf, insn, INSN_SIZE);
25879 }
25880 break;
25881
25882 case BFD_RELOC_ARM_LDR_PC_G0:
25883 case BFD_RELOC_ARM_LDR_PC_G1:
25884 case BFD_RELOC_ARM_LDR_PC_G2:
25885 case BFD_RELOC_ARM_LDR_SB_G0:
25886 case BFD_RELOC_ARM_LDR_SB_G1:
25887 case BFD_RELOC_ARM_LDR_SB_G2:
25888 gas_assert (!fixP->fx_done);
25889 if (!seg->use_rela_p)
25890 {
25891 bfd_vma insn;
25892 bfd_vma addend_abs = llabs (value);
25893
25894 /* Check that the absolute value of the addend can be
25895 encoded in 12 bits. */
25896 if (addend_abs >= 0x1000)
25897 as_bad_where (fixP->fx_file, fixP->fx_line,
25898 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25899 (unsigned long) addend_abs);
25900
25901 /* Extract the instruction. */
25902 insn = md_chars_to_number (buf, INSN_SIZE);
25903
25904 /* If the addend is negative, clear bit 23 of the instruction.
25905 Otherwise set it. */
25906 if (value < 0)
25907 insn &= ~(1 << 23);
25908 else
25909 insn |= 1 << 23;
25910
25911 /* Place the absolute value of the addend into the first 12 bits
25912 of the instruction. */
25913 insn &= 0xfffff000;
25914 insn |= addend_abs;
25915
25916 /* Update the instruction. */
25917 md_number_to_chars (buf, insn, INSN_SIZE);
25918 }
25919 break;
25920
25921 case BFD_RELOC_ARM_LDRS_PC_G0:
25922 case BFD_RELOC_ARM_LDRS_PC_G1:
25923 case BFD_RELOC_ARM_LDRS_PC_G2:
25924 case BFD_RELOC_ARM_LDRS_SB_G0:
25925 case BFD_RELOC_ARM_LDRS_SB_G1:
25926 case BFD_RELOC_ARM_LDRS_SB_G2:
25927 gas_assert (!fixP->fx_done);
25928 if (!seg->use_rela_p)
25929 {
25930 bfd_vma insn;
25931 bfd_vma addend_abs = llabs (value);
25932
25933 /* Check that the absolute value of the addend can be
25934 encoded in 8 bits. */
25935 if (addend_abs >= 0x100)
25936 as_bad_where (fixP->fx_file, fixP->fx_line,
25937 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25938 (unsigned long) addend_abs);
25939
25940 /* Extract the instruction. */
25941 insn = md_chars_to_number (buf, INSN_SIZE);
25942
25943 /* If the addend is negative, clear bit 23 of the instruction.
25944 Otherwise set it. */
25945 if (value < 0)
25946 insn &= ~(1 << 23);
25947 else
25948 insn |= 1 << 23;
25949
25950 /* Place the first four bits of the absolute value of the addend
25951 into the first 4 bits of the instruction, and the remaining
25952 four into bits 8 .. 11. */
25953 insn &= 0xfffff0f0;
25954 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
25955
25956 /* Update the instruction. */
25957 md_number_to_chars (buf, insn, INSN_SIZE);
25958 }
25959 break;
25960
25961 case BFD_RELOC_ARM_LDC_PC_G0:
25962 case BFD_RELOC_ARM_LDC_PC_G1:
25963 case BFD_RELOC_ARM_LDC_PC_G2:
25964 case BFD_RELOC_ARM_LDC_SB_G0:
25965 case BFD_RELOC_ARM_LDC_SB_G1:
25966 case BFD_RELOC_ARM_LDC_SB_G2:
25967 gas_assert (!fixP->fx_done);
25968 if (!seg->use_rela_p)
25969 {
25970 bfd_vma insn;
25971 bfd_vma addend_abs = llabs (value);
25972
25973 /* Check that the absolute value of the addend is a multiple of
25974 four and, when divided by four, fits in 8 bits. */
25975 if (addend_abs & 0x3)
25976 as_bad_where (fixP->fx_file, fixP->fx_line,
25977 _("bad offset 0x%08lX (must be word-aligned)"),
25978 (unsigned long) addend_abs);
25979
25980 if ((addend_abs >> 2) > 0xff)
25981 as_bad_where (fixP->fx_file, fixP->fx_line,
25982 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25983 (unsigned long) addend_abs);
25984
25985 /* Extract the instruction. */
25986 insn = md_chars_to_number (buf, INSN_SIZE);
25987
25988 /* If the addend is negative, clear bit 23 of the instruction.
25989 Otherwise set it. */
25990 if (value < 0)
25991 insn &= ~(1 << 23);
25992 else
25993 insn |= 1 << 23;
25994
25995 /* Place the addend (divided by four) into the first eight
25996 bits of the instruction. */
25997 insn &= 0xfffffff0;
25998 insn |= addend_abs >> 2;
25999
26000 /* Update the instruction. */
26001 md_number_to_chars (buf, insn, INSN_SIZE);
26002 }
26003 break;
26004
26005 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26006 if (fixP->fx_addsy
26007 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26008 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26009 && ARM_IS_FUNC (fixP->fx_addsy)
26010 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26011 {
26012 /* Force a relocation for a branch 5 bits wide. */
26013 fixP->fx_done = 0;
26014 }
26015 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
26016 as_bad_where (fixP->fx_file, fixP->fx_line,
26017 BAD_BRANCH_OFF);
26018
26019 if (fixP->fx_done || !seg->use_rela_p)
26020 {
26021 addressT boff = value >> 1;
26022
26023 newval = md_chars_to_number (buf, THUMB_SIZE);
26024 newval |= (boff << 7);
26025 md_number_to_chars (buf, newval, THUMB_SIZE);
26026 }
26027 break;
26028
26029 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26030 if (fixP->fx_addsy
26031 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26032 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26033 && ARM_IS_FUNC (fixP->fx_addsy)
26034 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26035 {
26036 fixP->fx_done = 0;
26037 }
26038 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
26039 as_bad_where (fixP->fx_file, fixP->fx_line,
26040 _("branch out of range"));
26041
26042 if (fixP->fx_done || !seg->use_rela_p)
26043 {
26044 newval = md_chars_to_number (buf, THUMB_SIZE);
26045
26046 addressT boff = ((newval & 0x0780) >> 7) << 1;
26047 addressT diff = value - boff;
26048
26049 if (diff == 4)
26050 {
26051 newval |= 1 << 1; /* T bit. */
26052 }
26053 else if (diff != 2)
26054 {
26055 as_bad_where (fixP->fx_file, fixP->fx_line,
26056 _("out of range label-relative fixup value"));
26057 }
26058 md_number_to_chars (buf, newval, THUMB_SIZE);
26059 }
26060 break;
26061
26062 case BFD_RELOC_ARM_THUMB_BF17:
26063 if (fixP->fx_addsy
26064 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26065 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26066 && ARM_IS_FUNC (fixP->fx_addsy)
26067 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26068 {
26069 /* Force a relocation for a branch 17 bits wide. */
26070 fixP->fx_done = 0;
26071 }
26072
26073 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
26074 as_bad_where (fixP->fx_file, fixP->fx_line,
26075 BAD_BRANCH_OFF);
26076
26077 if (fixP->fx_done || !seg->use_rela_p)
26078 {
26079 offsetT newval2;
26080 addressT immA, immB, immC;
26081
26082 immA = (value & 0x0001f000) >> 12;
26083 immB = (value & 0x00000ffc) >> 2;
26084 immC = (value & 0x00000002) >> 1;
26085
26086 newval = md_chars_to_number (buf, THUMB_SIZE);
26087 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26088 newval |= immA;
26089 newval2 |= (immC << 11) | (immB << 1);
26090 md_number_to_chars (buf, newval, THUMB_SIZE);
26091 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26092 }
26093 break;
26094
26095 case BFD_RELOC_ARM_THUMB_BF19:
26096 if (fixP->fx_addsy
26097 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26098 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26099 && ARM_IS_FUNC (fixP->fx_addsy)
26100 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26101 {
26102 /* Force a relocation for a branch 19 bits wide. */
26103 fixP->fx_done = 0;
26104 }
26105
26106 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
26107 as_bad_where (fixP->fx_file, fixP->fx_line,
26108 BAD_BRANCH_OFF);
26109
26110 if (fixP->fx_done || !seg->use_rela_p)
26111 {
26112 offsetT newval2;
26113 addressT immA, immB, immC;
26114
26115 immA = (value & 0x0007f000) >> 12;
26116 immB = (value & 0x00000ffc) >> 2;
26117 immC = (value & 0x00000002) >> 1;
26118
26119 newval = md_chars_to_number (buf, THUMB_SIZE);
26120 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26121 newval |= immA;
26122 newval2 |= (immC << 11) | (immB << 1);
26123 md_number_to_chars (buf, newval, THUMB_SIZE);
26124 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26125 }
26126 break;
26127
26128 case BFD_RELOC_ARM_THUMB_BF13:
26129 if (fixP->fx_addsy
26130 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26131 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26132 && ARM_IS_FUNC (fixP->fx_addsy)
26133 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26134 {
26135 /* Force a relocation for a branch 13 bits wide. */
26136 fixP->fx_done = 0;
26137 }
26138
26139 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
26140 as_bad_where (fixP->fx_file, fixP->fx_line,
26141 BAD_BRANCH_OFF);
26142
26143 if (fixP->fx_done || !seg->use_rela_p)
26144 {
26145 offsetT newval2;
26146 addressT immA, immB, immC;
26147
26148 immA = (value & 0x00001000) >> 12;
26149 immB = (value & 0x00000ffc) >> 2;
26150 immC = (value & 0x00000002) >> 1;
26151
26152 newval = md_chars_to_number (buf, THUMB_SIZE);
26153 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26154 newval |= immA;
26155 newval2 |= (immC << 11) | (immB << 1);
26156 md_number_to_chars (buf, newval, THUMB_SIZE);
26157 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26158 }
26159 break;
26160
26161 case BFD_RELOC_ARM_THUMB_LOOP12:
26162 if (fixP->fx_addsy
26163 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26164 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26165 && ARM_IS_FUNC (fixP->fx_addsy)
26166 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26167 {
26168 /* Force a relocation for a branch 12 bits wide. */
26169 fixP->fx_done = 0;
26170 }
26171
26172 bfd_vma insn = get_thumb32_insn (buf);
26173 /* le lr, <label> or le <label> */
26174 if (((insn & 0xffffffff) == 0xf00fc001)
26175 || ((insn & 0xffffffff) == 0xf02fc001))
26176 value = -value;
26177
26178 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
26179 as_bad_where (fixP->fx_file, fixP->fx_line,
26180 BAD_BRANCH_OFF);
26181 if (fixP->fx_done || !seg->use_rela_p)
26182 {
26183 addressT imml, immh;
26184
26185 immh = (value & 0x00000ffc) >> 2;
26186 imml = (value & 0x00000002) >> 1;
26187
26188 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26189 newval |= (imml << 11) | (immh << 1);
26190 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
26191 }
26192 break;
26193
26194 case BFD_RELOC_ARM_V4BX:
26195 /* This will need to go in the object file. */
26196 fixP->fx_done = 0;
26197 break;
26198
26199 case BFD_RELOC_UNUSED:
26200 default:
26201 as_bad_where (fixP->fx_file, fixP->fx_line,
26202 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
26203 }
26204 }
26205
26206 /* Translate internal representation of relocation info to BFD target
26207 format. */
26208
26209 arelent *
26210 tc_gen_reloc (asection *section, fixS *fixp)
26211 {
26212 arelent * reloc;
26213 bfd_reloc_code_real_type code;
26214
26215 reloc = XNEW (arelent);
26216
26217 reloc->sym_ptr_ptr = XNEW (asymbol *);
26218 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
26219 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
26220
26221 if (fixp->fx_pcrel)
26222 {
26223 if (section->use_rela_p)
26224 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
26225 else
26226 fixp->fx_offset = reloc->address;
26227 }
26228 reloc->addend = fixp->fx_offset;
26229
26230 switch (fixp->fx_r_type)
26231 {
26232 case BFD_RELOC_8:
26233 if (fixp->fx_pcrel)
26234 {
26235 code = BFD_RELOC_8_PCREL;
26236 break;
26237 }
26238 /* Fall through. */
26239
26240 case BFD_RELOC_16:
26241 if (fixp->fx_pcrel)
26242 {
26243 code = BFD_RELOC_16_PCREL;
26244 break;
26245 }
26246 /* Fall through. */
26247
26248 case BFD_RELOC_32:
26249 if (fixp->fx_pcrel)
26250 {
26251 code = BFD_RELOC_32_PCREL;
26252 break;
26253 }
26254 /* Fall through. */
26255
26256 case BFD_RELOC_ARM_MOVW:
26257 if (fixp->fx_pcrel)
26258 {
26259 code = BFD_RELOC_ARM_MOVW_PCREL;
26260 break;
26261 }
26262 /* Fall through. */
26263
26264 case BFD_RELOC_ARM_MOVT:
26265 if (fixp->fx_pcrel)
26266 {
26267 code = BFD_RELOC_ARM_MOVT_PCREL;
26268 break;
26269 }
26270 /* Fall through. */
26271
26272 case BFD_RELOC_ARM_THUMB_MOVW:
26273 if (fixp->fx_pcrel)
26274 {
26275 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
26276 break;
26277 }
26278 /* Fall through. */
26279
26280 case BFD_RELOC_ARM_THUMB_MOVT:
26281 if (fixp->fx_pcrel)
26282 {
26283 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
26284 break;
26285 }
26286 /* Fall through. */
26287
26288 case BFD_RELOC_NONE:
26289 case BFD_RELOC_ARM_PCREL_BRANCH:
26290 case BFD_RELOC_ARM_PCREL_BLX:
26291 case BFD_RELOC_RVA:
26292 case BFD_RELOC_THUMB_PCREL_BRANCH7:
26293 case BFD_RELOC_THUMB_PCREL_BRANCH9:
26294 case BFD_RELOC_THUMB_PCREL_BRANCH12:
26295 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26296 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26297 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26298 case BFD_RELOC_VTABLE_ENTRY:
26299 case BFD_RELOC_VTABLE_INHERIT:
26300 #ifdef TE_PE
26301 case BFD_RELOC_32_SECREL:
26302 #endif
26303 code = fixp->fx_r_type;
26304 break;
26305
26306 case BFD_RELOC_THUMB_PCREL_BLX:
26307 #ifdef OBJ_ELF
26308 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
26309 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
26310 else
26311 #endif
26312 code = BFD_RELOC_THUMB_PCREL_BLX;
26313 break;
26314
26315 case BFD_RELOC_ARM_LITERAL:
26316 case BFD_RELOC_ARM_HWLITERAL:
26317 /* If this is called then the a literal has
26318 been referenced across a section boundary. */
26319 as_bad_where (fixp->fx_file, fixp->fx_line,
26320 _("literal referenced across section boundary"));
26321 return NULL;
26322
26323 #ifdef OBJ_ELF
26324 case BFD_RELOC_ARM_TLS_CALL:
26325 case BFD_RELOC_ARM_THM_TLS_CALL:
26326 case BFD_RELOC_ARM_TLS_DESCSEQ:
26327 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
26328 case BFD_RELOC_ARM_GOT32:
26329 case BFD_RELOC_ARM_GOTOFF:
26330 case BFD_RELOC_ARM_GOT_PREL:
26331 case BFD_RELOC_ARM_PLT32:
26332 case BFD_RELOC_ARM_TARGET1:
26333 case BFD_RELOC_ARM_ROSEGREL32:
26334 case BFD_RELOC_ARM_SBREL32:
26335 case BFD_RELOC_ARM_PREL31:
26336 case BFD_RELOC_ARM_TARGET2:
26337 case BFD_RELOC_ARM_TLS_LDO32:
26338 case BFD_RELOC_ARM_PCREL_CALL:
26339 case BFD_RELOC_ARM_PCREL_JUMP:
26340 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26341 case BFD_RELOC_ARM_ALU_PC_G0:
26342 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26343 case BFD_RELOC_ARM_ALU_PC_G1:
26344 case BFD_RELOC_ARM_ALU_PC_G2:
26345 case BFD_RELOC_ARM_LDR_PC_G0:
26346 case BFD_RELOC_ARM_LDR_PC_G1:
26347 case BFD_RELOC_ARM_LDR_PC_G2:
26348 case BFD_RELOC_ARM_LDRS_PC_G0:
26349 case BFD_RELOC_ARM_LDRS_PC_G1:
26350 case BFD_RELOC_ARM_LDRS_PC_G2:
26351 case BFD_RELOC_ARM_LDC_PC_G0:
26352 case BFD_RELOC_ARM_LDC_PC_G1:
26353 case BFD_RELOC_ARM_LDC_PC_G2:
26354 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26355 case BFD_RELOC_ARM_ALU_SB_G0:
26356 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26357 case BFD_RELOC_ARM_ALU_SB_G1:
26358 case BFD_RELOC_ARM_ALU_SB_G2:
26359 case BFD_RELOC_ARM_LDR_SB_G0:
26360 case BFD_RELOC_ARM_LDR_SB_G1:
26361 case BFD_RELOC_ARM_LDR_SB_G2:
26362 case BFD_RELOC_ARM_LDRS_SB_G0:
26363 case BFD_RELOC_ARM_LDRS_SB_G1:
26364 case BFD_RELOC_ARM_LDRS_SB_G2:
26365 case BFD_RELOC_ARM_LDC_SB_G0:
26366 case BFD_RELOC_ARM_LDC_SB_G1:
26367 case BFD_RELOC_ARM_LDC_SB_G2:
26368 case BFD_RELOC_ARM_V4BX:
26369 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26370 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26373 case BFD_RELOC_ARM_GOTFUNCDESC:
26374 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
26375 case BFD_RELOC_ARM_FUNCDESC:
26376 case BFD_RELOC_ARM_THUMB_BF17:
26377 case BFD_RELOC_ARM_THUMB_BF19:
26378 case BFD_RELOC_ARM_THUMB_BF13:
26379 code = fixp->fx_r_type;
26380 break;
26381
26382 case BFD_RELOC_ARM_TLS_GOTDESC:
26383 case BFD_RELOC_ARM_TLS_GD32:
26384 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
26385 case BFD_RELOC_ARM_TLS_LE32:
26386 case BFD_RELOC_ARM_TLS_IE32:
26387 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
26388 case BFD_RELOC_ARM_TLS_LDM32:
26389 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
26390 /* BFD will include the symbol's address in the addend.
26391 But we don't want that, so subtract it out again here. */
26392 if (!S_IS_COMMON (fixp->fx_addsy))
26393 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
26394 code = fixp->fx_r_type;
26395 break;
26396 #endif
26397
26398 case BFD_RELOC_ARM_IMMEDIATE:
26399 as_bad_where (fixp->fx_file, fixp->fx_line,
26400 _("internal relocation (type: IMMEDIATE) not fixed up"));
26401 return NULL;
26402
26403 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
26404 as_bad_where (fixp->fx_file, fixp->fx_line,
26405 _("ADRL used for a symbol not defined in the same file"));
26406 return NULL;
26407
26408 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26409 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26410 case BFD_RELOC_ARM_THUMB_LOOP12:
26411 as_bad_where (fixp->fx_file, fixp->fx_line,
26412 _("%s used for a symbol not defined in the same file"),
26413 bfd_get_reloc_code_name (fixp->fx_r_type));
26414 return NULL;
26415
26416 case BFD_RELOC_ARM_OFFSET_IMM:
26417 if (section->use_rela_p)
26418 {
26419 code = fixp->fx_r_type;
26420 break;
26421 }
26422
26423 if (fixp->fx_addsy != NULL
26424 && !S_IS_DEFINED (fixp->fx_addsy)
26425 && S_IS_LOCAL (fixp->fx_addsy))
26426 {
26427 as_bad_where (fixp->fx_file, fixp->fx_line,
26428 _("undefined local label `%s'"),
26429 S_GET_NAME (fixp->fx_addsy));
26430 return NULL;
26431 }
26432
26433 as_bad_where (fixp->fx_file, fixp->fx_line,
26434 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26435 return NULL;
26436
26437 default:
26438 {
26439 const char * type;
26440
26441 switch (fixp->fx_r_type)
26442 {
26443 case BFD_RELOC_NONE: type = "NONE"; break;
26444 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
26445 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
26446 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
26447 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
26448 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
26449 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
26450 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
26451 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
26452 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
26453 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
26454 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
26455 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
26456 default: type = _("<unknown>"); break;
26457 }
26458 as_bad_where (fixp->fx_file, fixp->fx_line,
26459 _("cannot represent %s relocation in this object file format"),
26460 type);
26461 return NULL;
26462 }
26463 }
26464
26465 #ifdef OBJ_ELF
26466 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
26467 && GOT_symbol
26468 && fixp->fx_addsy == GOT_symbol)
26469 {
26470 code = BFD_RELOC_ARM_GOTPC;
26471 reloc->addend = fixp->fx_offset = reloc->address;
26472 }
26473 #endif
26474
26475 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
26476
26477 if (reloc->howto == NULL)
26478 {
26479 as_bad_where (fixp->fx_file, fixp->fx_line,
26480 _("cannot represent %s relocation in this object file format"),
26481 bfd_get_reloc_code_name (code));
26482 return NULL;
26483 }
26484
26485 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26486 vtable entry to be used in the relocation's section offset. */
26487 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
26488 reloc->address = fixp->fx_offset;
26489
26490 return reloc;
26491 }
26492
26493 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26494
26495 void
26496 cons_fix_new_arm (fragS * frag,
26497 int where,
26498 int size,
26499 expressionS * exp,
26500 bfd_reloc_code_real_type reloc)
26501 {
26502 int pcrel = 0;
26503
26504 /* Pick a reloc.
26505 FIXME: @@ Should look at CPU word size. */
26506 switch (size)
26507 {
26508 case 1:
26509 reloc = BFD_RELOC_8;
26510 break;
26511 case 2:
26512 reloc = BFD_RELOC_16;
26513 break;
26514 case 4:
26515 default:
26516 reloc = BFD_RELOC_32;
26517 break;
26518 case 8:
26519 reloc = BFD_RELOC_64;
26520 break;
26521 }
26522
26523 #ifdef TE_PE
26524 if (exp->X_op == O_secrel)
26525 {
26526 exp->X_op = O_symbol;
26527 reloc = BFD_RELOC_32_SECREL;
26528 }
26529 #endif
26530
26531 fix_new_exp (frag, where, size, exp, pcrel, reloc);
26532 }
26533
26534 #if defined (OBJ_COFF)
26535 void
26536 arm_validate_fix (fixS * fixP)
26537 {
26538 /* If the destination of the branch is a defined symbol which does not have
26539 the THUMB_FUNC attribute, then we must be calling a function which has
26540 the (interfacearm) attribute. We look for the Thumb entry point to that
26541 function and change the branch to refer to that function instead. */
26542 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
26543 && fixP->fx_addsy != NULL
26544 && S_IS_DEFINED (fixP->fx_addsy)
26545 && ! THUMB_IS_FUNC (fixP->fx_addsy))
26546 {
26547 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
26548 }
26549 }
26550 #endif
26551
26552
26553 int
26554 arm_force_relocation (struct fix * fixp)
26555 {
26556 #if defined (OBJ_COFF) && defined (TE_PE)
26557 if (fixp->fx_r_type == BFD_RELOC_RVA)
26558 return 1;
26559 #endif
26560
26561 /* In case we have a call or a branch to a function in ARM ISA mode from
26562 a thumb function or vice-versa force the relocation. These relocations
26563 are cleared off for some cores that might have blx and simple transformations
26564 are possible. */
26565
26566 #ifdef OBJ_ELF
26567 switch (fixp->fx_r_type)
26568 {
26569 case BFD_RELOC_ARM_PCREL_JUMP:
26570 case BFD_RELOC_ARM_PCREL_CALL:
26571 case BFD_RELOC_THUMB_PCREL_BLX:
26572 if (THUMB_IS_FUNC (fixp->fx_addsy))
26573 return 1;
26574 break;
26575
26576 case BFD_RELOC_ARM_PCREL_BLX:
26577 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26578 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26579 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26580 if (ARM_IS_FUNC (fixp->fx_addsy))
26581 return 1;
26582 break;
26583
26584 default:
26585 break;
26586 }
26587 #endif
26588
26589 /* Resolve these relocations even if the symbol is extern or weak.
26590 Technically this is probably wrong due to symbol preemption.
26591 In practice these relocations do not have enough range to be useful
26592 at dynamic link time, and some code (e.g. in the Linux kernel)
26593 expects these references to be resolved. */
26594 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
26595 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
26596 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
26597 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
26598 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
26599 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
26600 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
26601 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
26602 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
26603 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
26604 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
26605 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
26606 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
26607 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
26608 return 0;
26609
26610 /* Always leave these relocations for the linker. */
26611 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
26612 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
26613 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
26614 return 1;
26615
26616 /* Always generate relocations against function symbols. */
26617 if (fixp->fx_r_type == BFD_RELOC_32
26618 && fixp->fx_addsy
26619 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
26620 return 1;
26621
26622 return generic_force_reloc (fixp);
26623 }
26624
26625 #if defined (OBJ_ELF) || defined (OBJ_COFF)
26626 /* Relocations against function names must be left unadjusted,
26627 so that the linker can use this information to generate interworking
26628 stubs. The MIPS version of this function
26629 also prevents relocations that are mips-16 specific, but I do not
26630 know why it does this.
26631
26632 FIXME:
26633 There is one other problem that ought to be addressed here, but
26634 which currently is not: Taking the address of a label (rather
26635 than a function) and then later jumping to that address. Such
26636 addresses also ought to have their bottom bit set (assuming that
26637 they reside in Thumb code), but at the moment they will not. */
26638
26639 bfd_boolean
26640 arm_fix_adjustable (fixS * fixP)
26641 {
26642 if (fixP->fx_addsy == NULL)
26643 return 1;
26644
26645 /* Preserve relocations against symbols with function type. */
26646 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
26647 return FALSE;
26648
26649 if (THUMB_IS_FUNC (fixP->fx_addsy)
26650 && fixP->fx_subsy == NULL)
26651 return FALSE;
26652
26653 /* We need the symbol name for the VTABLE entries. */
26654 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
26655 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
26656 return FALSE;
26657
26658 /* Don't allow symbols to be discarded on GOT related relocs. */
26659 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
26660 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
26661 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
26662 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
26663 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
26664 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
26665 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
26666 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
26667 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
26668 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
26669 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
26670 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
26671 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
26672 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
26673 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
26674 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
26675 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
26676 return FALSE;
26677
26678 /* Similarly for group relocations. */
26679 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
26680 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
26681 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
26682 return FALSE;
26683
26684 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
26685 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
26686 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
26687 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
26688 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
26689 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
26690 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
26691 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
26692 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
26693 return FALSE;
26694
26695 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
26696 offsets, so keep these symbols. */
26697 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
26698 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
26699 return FALSE;
26700
26701 return TRUE;
26702 }
26703 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
26704
26705 #ifdef OBJ_ELF
26706 const char *
26707 elf32_arm_target_format (void)
26708 {
26709 #ifdef TE_SYMBIAN
26710 return (target_big_endian
26711 ? "elf32-bigarm-symbian"
26712 : "elf32-littlearm-symbian");
26713 #elif defined (TE_VXWORKS)
26714 return (target_big_endian
26715 ? "elf32-bigarm-vxworks"
26716 : "elf32-littlearm-vxworks");
26717 #elif defined (TE_NACL)
26718 return (target_big_endian
26719 ? "elf32-bigarm-nacl"
26720 : "elf32-littlearm-nacl");
26721 #else
26722 if (arm_fdpic)
26723 {
26724 if (target_big_endian)
26725 return "elf32-bigarm-fdpic";
26726 else
26727 return "elf32-littlearm-fdpic";
26728 }
26729 else
26730 {
26731 if (target_big_endian)
26732 return "elf32-bigarm";
26733 else
26734 return "elf32-littlearm";
26735 }
26736 #endif
26737 }
26738
26739 void
26740 armelf_frob_symbol (symbolS * symp,
26741 int * puntp)
26742 {
26743 elf_frob_symbol (symp, puntp);
26744 }
26745 #endif
26746
26747 /* MD interface: Finalization. */
26748
26749 void
26750 arm_cleanup (void)
26751 {
26752 literal_pool * pool;
26753
26754 /* Ensure that all the predication blocks are properly closed. */
26755 check_pred_blocks_finished ();
26756
26757 for (pool = list_of_pools; pool; pool = pool->next)
26758 {
26759 /* Put it at the end of the relevant section. */
26760 subseg_set (pool->section, pool->sub_section);
26761 #ifdef OBJ_ELF
26762 arm_elf_change_section ();
26763 #endif
26764 s_ltorg (0);
26765 }
26766 }
26767
26768 #ifdef OBJ_ELF
26769 /* Remove any excess mapping symbols generated for alignment frags in
26770 SEC. We may have created a mapping symbol before a zero byte
26771 alignment; remove it if there's a mapping symbol after the
26772 alignment. */
26773 static void
26774 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
26775 void *dummy ATTRIBUTE_UNUSED)
26776 {
26777 segment_info_type *seginfo = seg_info (sec);
26778 fragS *fragp;
26779
26780 if (seginfo == NULL || seginfo->frchainP == NULL)
26781 return;
26782
26783 for (fragp = seginfo->frchainP->frch_root;
26784 fragp != NULL;
26785 fragp = fragp->fr_next)
26786 {
26787 symbolS *sym = fragp->tc_frag_data.last_map;
26788 fragS *next = fragp->fr_next;
26789
26790 /* Variable-sized frags have been converted to fixed size by
26791 this point. But if this was variable-sized to start with,
26792 there will be a fixed-size frag after it. So don't handle
26793 next == NULL. */
26794 if (sym == NULL || next == NULL)
26795 continue;
26796
26797 if (S_GET_VALUE (sym) < next->fr_address)
26798 /* Not at the end of this frag. */
26799 continue;
26800 know (S_GET_VALUE (sym) == next->fr_address);
26801
26802 do
26803 {
26804 if (next->tc_frag_data.first_map != NULL)
26805 {
26806 /* Next frag starts with a mapping symbol. Discard this
26807 one. */
26808 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26809 break;
26810 }
26811
26812 if (next->fr_next == NULL)
26813 {
26814 /* This mapping symbol is at the end of the section. Discard
26815 it. */
26816 know (next->fr_fix == 0 && next->fr_var == 0);
26817 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26818 break;
26819 }
26820
26821 /* As long as we have empty frags without any mapping symbols,
26822 keep looking. */
26823 /* If the next frag is non-empty and does not start with a
26824 mapping symbol, then this mapping symbol is required. */
26825 if (next->fr_address != next->fr_next->fr_address)
26826 break;
26827
26828 next = next->fr_next;
26829 }
26830 while (next != NULL);
26831 }
26832 }
26833 #endif
26834
26835 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26836 ARM ones. */
26837
26838 void
26839 arm_adjust_symtab (void)
26840 {
26841 #ifdef OBJ_COFF
26842 symbolS * sym;
26843
26844 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26845 {
26846 if (ARM_IS_THUMB (sym))
26847 {
26848 if (THUMB_IS_FUNC (sym))
26849 {
26850 /* Mark the symbol as a Thumb function. */
26851 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
26852 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
26853 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
26854
26855 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
26856 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
26857 else
26858 as_bad (_("%s: unexpected function type: %d"),
26859 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
26860 }
26861 else switch (S_GET_STORAGE_CLASS (sym))
26862 {
26863 case C_EXT:
26864 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
26865 break;
26866 case C_STAT:
26867 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
26868 break;
26869 case C_LABEL:
26870 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
26871 break;
26872 default:
26873 /* Do nothing. */
26874 break;
26875 }
26876 }
26877
26878 if (ARM_IS_INTERWORK (sym))
26879 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
26880 }
26881 #endif
26882 #ifdef OBJ_ELF
26883 symbolS * sym;
26884 char bind;
26885
26886 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26887 {
26888 if (ARM_IS_THUMB (sym))
26889 {
26890 elf_symbol_type * elf_sym;
26891
26892 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
26893 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
26894
26895 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
26896 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
26897 {
26898 /* If it's a .thumb_func, declare it as so,
26899 otherwise tag label as .code 16. */
26900 if (THUMB_IS_FUNC (sym))
26901 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
26902 ST_BRANCH_TO_THUMB);
26903 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26904 elf_sym->internal_elf_sym.st_info =
26905 ELF_ST_INFO (bind, STT_ARM_16BIT);
26906 }
26907 }
26908 }
26909
26910 /* Remove any overlapping mapping symbols generated by alignment frags. */
26911 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
26912 /* Now do generic ELF adjustments. */
26913 elf_adjust_symtab ();
26914 #endif
26915 }
26916
26917 /* MD interface: Initialization. */
26918
26919 static void
26920 set_constant_flonums (void)
26921 {
26922 int i;
26923
26924 for (i = 0; i < NUM_FLOAT_VALS; i++)
26925 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
26926 abort ();
26927 }
26928
26929 /* Auto-select Thumb mode if it's the only available instruction set for the
26930 given architecture. */
26931
26932 static void
26933 autoselect_thumb_from_cpu_variant (void)
26934 {
26935 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
26936 opcode_select (16);
26937 }
26938
26939 void
26940 md_begin (void)
26941 {
26942 unsigned mach;
26943 unsigned int i;
26944
26945 if ( (arm_ops_hsh = hash_new ()) == NULL
26946 || (arm_cond_hsh = hash_new ()) == NULL
26947 || (arm_vcond_hsh = hash_new ()) == NULL
26948 || (arm_shift_hsh = hash_new ()) == NULL
26949 || (arm_psr_hsh = hash_new ()) == NULL
26950 || (arm_v7m_psr_hsh = hash_new ()) == NULL
26951 || (arm_reg_hsh = hash_new ()) == NULL
26952 || (arm_reloc_hsh = hash_new ()) == NULL
26953 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
26954 as_fatal (_("virtual memory exhausted"));
26955
26956 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
26957 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
26958 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
26959 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
26960 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
26961 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
26962 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
26963 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
26964 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
26965 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
26966 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
26967 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
26968 (void *) (v7m_psrs + i));
26969 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
26970 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
26971 for (i = 0;
26972 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
26973 i++)
26974 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
26975 (void *) (barrier_opt_names + i));
26976 #ifdef OBJ_ELF
26977 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
26978 {
26979 struct reloc_entry * entry = reloc_names + i;
26980
26981 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
26982 /* This makes encode_branch() use the EABI versions of this relocation. */
26983 entry->reloc = BFD_RELOC_UNUSED;
26984
26985 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
26986 }
26987 #endif
26988
26989 set_constant_flonums ();
26990
26991 /* Set the cpu variant based on the command-line options. We prefer
26992 -mcpu= over -march= if both are set (as for GCC); and we prefer
26993 -mfpu= over any other way of setting the floating point unit.
26994 Use of legacy options with new options are faulted. */
26995 if (legacy_cpu)
26996 {
26997 if (mcpu_cpu_opt || march_cpu_opt)
26998 as_bad (_("use of old and new-style options to set CPU type"));
26999
27000 selected_arch = *legacy_cpu;
27001 }
27002 else if (mcpu_cpu_opt)
27003 {
27004 selected_arch = *mcpu_cpu_opt;
27005 selected_ext = *mcpu_ext_opt;
27006 }
27007 else if (march_cpu_opt)
27008 {
27009 selected_arch = *march_cpu_opt;
27010 selected_ext = *march_ext_opt;
27011 }
27012 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27013
27014 if (legacy_fpu)
27015 {
27016 if (mfpu_opt)
27017 as_bad (_("use of old and new-style options to set FPU type"));
27018
27019 selected_fpu = *legacy_fpu;
27020 }
27021 else if (mfpu_opt)
27022 selected_fpu = *mfpu_opt;
27023 else
27024 {
27025 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27026 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27027 /* Some environments specify a default FPU. If they don't, infer it
27028 from the processor. */
27029 if (mcpu_fpu_opt)
27030 selected_fpu = *mcpu_fpu_opt;
27031 else if (march_fpu_opt)
27032 selected_fpu = *march_fpu_opt;
27033 #else
27034 selected_fpu = fpu_default;
27035 #endif
27036 }
27037
27038 if (ARM_FEATURE_ZERO (selected_fpu))
27039 {
27040 if (!no_cpu_selected ())
27041 selected_fpu = fpu_default;
27042 else
27043 selected_fpu = fpu_arch_fpa;
27044 }
27045
27046 #ifdef CPU_DEFAULT
27047 if (ARM_FEATURE_ZERO (selected_arch))
27048 {
27049 selected_arch = cpu_default;
27050 selected_cpu = selected_arch;
27051 }
27052 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27053 #else
27054 /* Autodection of feature mode: allow all features in cpu_variant but leave
27055 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27056 after all instruction have been processed and we can decide what CPU
27057 should be selected. */
27058 if (ARM_FEATURE_ZERO (selected_arch))
27059 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27060 else
27061 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27062 #endif
27063
27064 autoselect_thumb_from_cpu_variant ();
27065
27066 arm_arch_used = thumb_arch_used = arm_arch_none;
27067
27068 #if defined OBJ_COFF || defined OBJ_ELF
27069 {
27070 unsigned int flags = 0;
27071
27072 #if defined OBJ_ELF
27073 flags = meabi_flags;
27074
27075 switch (meabi_flags)
27076 {
27077 case EF_ARM_EABI_UNKNOWN:
27078 #endif
27079 /* Set the flags in the private structure. */
27080 if (uses_apcs_26) flags |= F_APCS26;
27081 if (support_interwork) flags |= F_INTERWORK;
27082 if (uses_apcs_float) flags |= F_APCS_FLOAT;
27083 if (pic_code) flags |= F_PIC;
27084 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
27085 flags |= F_SOFT_FLOAT;
27086
27087 switch (mfloat_abi_opt)
27088 {
27089 case ARM_FLOAT_ABI_SOFT:
27090 case ARM_FLOAT_ABI_SOFTFP:
27091 flags |= F_SOFT_FLOAT;
27092 break;
27093
27094 case ARM_FLOAT_ABI_HARD:
27095 if (flags & F_SOFT_FLOAT)
27096 as_bad (_("hard-float conflicts with specified fpu"));
27097 break;
27098 }
27099
27100 /* Using pure-endian doubles (even if soft-float). */
27101 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
27102 flags |= F_VFP_FLOAT;
27103
27104 #if defined OBJ_ELF
27105 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
27106 flags |= EF_ARM_MAVERICK_FLOAT;
27107 break;
27108
27109 case EF_ARM_EABI_VER4:
27110 case EF_ARM_EABI_VER5:
27111 /* No additional flags to set. */
27112 break;
27113
27114 default:
27115 abort ();
27116 }
27117 #endif
27118 bfd_set_private_flags (stdoutput, flags);
27119
27120 /* We have run out flags in the COFF header to encode the
27121 status of ATPCS support, so instead we create a dummy,
27122 empty, debug section called .arm.atpcs. */
27123 if (atpcs)
27124 {
27125 asection * sec;
27126
27127 sec = bfd_make_section (stdoutput, ".arm.atpcs");
27128
27129 if (sec != NULL)
27130 {
27131 bfd_set_section_flags
27132 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
27133 bfd_set_section_size (stdoutput, sec, 0);
27134 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
27135 }
27136 }
27137 }
27138 #endif
27139
27140 /* Record the CPU type as well. */
27141 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
27142 mach = bfd_mach_arm_iWMMXt2;
27143 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
27144 mach = bfd_mach_arm_iWMMXt;
27145 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
27146 mach = bfd_mach_arm_XScale;
27147 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
27148 mach = bfd_mach_arm_ep9312;
27149 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
27150 mach = bfd_mach_arm_5TE;
27151 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
27152 {
27153 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27154 mach = bfd_mach_arm_5T;
27155 else
27156 mach = bfd_mach_arm_5;
27157 }
27158 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
27159 {
27160 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27161 mach = bfd_mach_arm_4T;
27162 else
27163 mach = bfd_mach_arm_4;
27164 }
27165 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
27166 mach = bfd_mach_arm_3M;
27167 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
27168 mach = bfd_mach_arm_3;
27169 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
27170 mach = bfd_mach_arm_2a;
27171 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
27172 mach = bfd_mach_arm_2;
27173 else
27174 mach = bfd_mach_arm_unknown;
27175
27176 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
27177 }
27178
27179 /* Command line processing. */
27180
27181 /* md_parse_option
27182 Invocation line includes a switch not recognized by the base assembler.
27183 See if it's a processor-specific option.
27184
27185 This routine is somewhat complicated by the need for backwards
27186 compatibility (since older releases of gcc can't be changed).
27187 The new options try to make the interface as compatible as
27188 possible with GCC.
27189
27190 New options (supported) are:
27191
27192 -mcpu=<cpu name> Assemble for selected processor
27193 -march=<architecture name> Assemble for selected architecture
27194 -mfpu=<fpu architecture> Assemble for selected FPU.
27195 -EB/-mbig-endian Big-endian
27196 -EL/-mlittle-endian Little-endian
27197 -k Generate PIC code
27198 -mthumb Start in Thumb mode
27199 -mthumb-interwork Code supports ARM/Thumb interworking
27200
27201 -m[no-]warn-deprecated Warn about deprecated features
27202 -m[no-]warn-syms Warn when symbols match instructions
27203
27204 For now we will also provide support for:
27205
27206 -mapcs-32 32-bit Program counter
27207 -mapcs-26 26-bit Program counter
27208 -macps-float Floats passed in FP registers
27209 -mapcs-reentrant Reentrant code
27210 -matpcs
27211 (sometime these will probably be replaced with -mapcs=<list of options>
27212 and -matpcs=<list of options>)
27213
27214 The remaining options are only supported for back-wards compatibility.
27215 Cpu variants, the arm part is optional:
27216 -m[arm]1 Currently not supported.
27217 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27218 -m[arm]3 Arm 3 processor
27219 -m[arm]6[xx], Arm 6 processors
27220 -m[arm]7[xx][t][[d]m] Arm 7 processors
27221 -m[arm]8[10] Arm 8 processors
27222 -m[arm]9[20][tdmi] Arm 9 processors
27223 -mstrongarm[110[0]] StrongARM processors
27224 -mxscale XScale processors
27225 -m[arm]v[2345[t[e]]] Arm architectures
27226 -mall All (except the ARM1)
27227 FP variants:
27228 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27229 -mfpe-old (No float load/store multiples)
27230 -mvfpxd VFP Single precision
27231 -mvfp All VFP
27232 -mno-fpu Disable all floating point instructions
27233
27234 The following CPU names are recognized:
27235 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27236 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27237 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27238 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27239 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27240 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27241 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27242
27243 */
27244
27245 const char * md_shortopts = "m:k";
27246
27247 #ifdef ARM_BI_ENDIAN
27248 #define OPTION_EB (OPTION_MD_BASE + 0)
27249 #define OPTION_EL (OPTION_MD_BASE + 1)
27250 #else
27251 #if TARGET_BYTES_BIG_ENDIAN
27252 #define OPTION_EB (OPTION_MD_BASE + 0)
27253 #else
27254 #define OPTION_EL (OPTION_MD_BASE + 1)
27255 #endif
27256 #endif
27257 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27258 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27259
27260 struct option md_longopts[] =
27261 {
27262 #ifdef OPTION_EB
27263 {"EB", no_argument, NULL, OPTION_EB},
27264 #endif
27265 #ifdef OPTION_EL
27266 {"EL", no_argument, NULL, OPTION_EL},
27267 #endif
27268 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
27269 #ifdef OBJ_ELF
27270 {"fdpic", no_argument, NULL, OPTION_FDPIC},
27271 #endif
27272 {NULL, no_argument, NULL, 0}
27273 };
27274
27275 size_t md_longopts_size = sizeof (md_longopts);
27276
27277 struct arm_option_table
27278 {
27279 const char * option; /* Option name to match. */
27280 const char * help; /* Help information. */
27281 int * var; /* Variable to change. */
27282 int value; /* What to change it to. */
27283 const char * deprecated; /* If non-null, print this message. */
27284 };
27285
27286 struct arm_option_table arm_opts[] =
27287 {
27288 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
27289 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
27290 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27291 &support_interwork, 1, NULL},
27292 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
27293 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
27294 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
27295 1, NULL},
27296 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
27297 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
27298 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
27299 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
27300 NULL},
27301
27302 /* These are recognized by the assembler, but have no affect on code. */
27303 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
27304 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
27305
27306 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
27307 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27308 &warn_on_deprecated, 0, NULL},
27309 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
27310 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
27311 {NULL, NULL, NULL, 0, NULL}
27312 };
27313
27314 struct arm_legacy_option_table
27315 {
27316 const char * option; /* Option name to match. */
27317 const arm_feature_set ** var; /* Variable to change. */
27318 const arm_feature_set value; /* What to change it to. */
27319 const char * deprecated; /* If non-null, print this message. */
27320 };
27321
27322 const struct arm_legacy_option_table arm_legacy_opts[] =
27323 {
27324 /* DON'T add any new processors to this list -- we want the whole list
27325 to go away... Add them to the processors table instead. */
27326 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27327 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27328 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27329 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27330 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27331 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27332 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27333 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27334 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27335 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27336 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27337 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27338 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27339 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27340 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27341 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27342 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27343 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27344 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27345 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27346 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27347 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27348 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27349 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27350 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27351 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27352 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27353 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27354 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27355 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27356 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27357 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27358 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27359 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27360 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27361 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27362 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27363 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27364 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27365 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27366 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27367 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27368 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27369 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27370 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27371 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27372 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27373 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27374 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27375 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27376 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27377 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27378 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27379 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27380 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27381 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27382 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27383 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27384 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27385 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27386 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27387 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27388 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27389 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27390 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27391 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27392 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27393 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27394 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
27395 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
27396 N_("use -mcpu=strongarm110")},
27397 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
27398 N_("use -mcpu=strongarm1100")},
27399 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
27400 N_("use -mcpu=strongarm1110")},
27401 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
27402 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
27403 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
27404
27405 /* Architecture variants -- don't add any more to this list either. */
27406 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27407 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27408 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27409 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27410 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27411 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27412 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27413 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27414 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27415 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27416 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27417 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27418 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27419 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27420 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27421 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27422 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27423 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27424
27425 /* Floating point variants -- don't add any more to this list either. */
27426 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
27427 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
27428 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
27429 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
27430 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27431
27432 {NULL, NULL, ARM_ARCH_NONE, NULL}
27433 };
27434
27435 struct arm_cpu_option_table
27436 {
27437 const char * name;
27438 size_t name_len;
27439 const arm_feature_set value;
27440 const arm_feature_set ext;
27441 /* For some CPUs we assume an FPU unless the user explicitly sets
27442 -mfpu=... */
27443 const arm_feature_set default_fpu;
27444 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27445 case. */
27446 const char * canonical_name;
27447 };
27448
27449 /* This list should, at a minimum, contain all the cpu names
27450 recognized by GCC. */
27451 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27452
27453 static const struct arm_cpu_option_table arm_cpus[] =
27454 {
27455 ARM_CPU_OPT ("all", NULL, ARM_ANY,
27456 ARM_ARCH_NONE,
27457 FPU_ARCH_FPA),
27458 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
27459 ARM_ARCH_NONE,
27460 FPU_ARCH_FPA),
27461 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
27462 ARM_ARCH_NONE,
27463 FPU_ARCH_FPA),
27464 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
27465 ARM_ARCH_NONE,
27466 FPU_ARCH_FPA),
27467 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
27468 ARM_ARCH_NONE,
27469 FPU_ARCH_FPA),
27470 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
27471 ARM_ARCH_NONE,
27472 FPU_ARCH_FPA),
27473 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
27474 ARM_ARCH_NONE,
27475 FPU_ARCH_FPA),
27476 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
27477 ARM_ARCH_NONE,
27478 FPU_ARCH_FPA),
27479 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
27480 ARM_ARCH_NONE,
27481 FPU_ARCH_FPA),
27482 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
27483 ARM_ARCH_NONE,
27484 FPU_ARCH_FPA),
27485 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
27486 ARM_ARCH_NONE,
27487 FPU_ARCH_FPA),
27488 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
27489 ARM_ARCH_NONE,
27490 FPU_ARCH_FPA),
27491 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
27492 ARM_ARCH_NONE,
27493 FPU_ARCH_FPA),
27494 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
27495 ARM_ARCH_NONE,
27496 FPU_ARCH_FPA),
27497 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
27498 ARM_ARCH_NONE,
27499 FPU_ARCH_FPA),
27500 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
27501 ARM_ARCH_NONE,
27502 FPU_ARCH_FPA),
27503 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
27504 ARM_ARCH_NONE,
27505 FPU_ARCH_FPA),
27506 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
27507 ARM_ARCH_NONE,
27508 FPU_ARCH_FPA),
27509 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
27510 ARM_ARCH_NONE,
27511 FPU_ARCH_FPA),
27512 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
27513 ARM_ARCH_NONE,
27514 FPU_ARCH_FPA),
27515 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
27516 ARM_ARCH_NONE,
27517 FPU_ARCH_FPA),
27518 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
27519 ARM_ARCH_NONE,
27520 FPU_ARCH_FPA),
27521 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
27522 ARM_ARCH_NONE,
27523 FPU_ARCH_FPA),
27524 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
27525 ARM_ARCH_NONE,
27526 FPU_ARCH_FPA),
27527 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
27528 ARM_ARCH_NONE,
27529 FPU_ARCH_FPA),
27530 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
27531 ARM_ARCH_NONE,
27532 FPU_ARCH_FPA),
27533 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
27534 ARM_ARCH_NONE,
27535 FPU_ARCH_FPA),
27536 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
27537 ARM_ARCH_NONE,
27538 FPU_ARCH_FPA),
27539 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
27540 ARM_ARCH_NONE,
27541 FPU_ARCH_FPA),
27542 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
27543 ARM_ARCH_NONE,
27544 FPU_ARCH_FPA),
27545 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
27546 ARM_ARCH_NONE,
27547 FPU_ARCH_FPA),
27548 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
27549 ARM_ARCH_NONE,
27550 FPU_ARCH_FPA),
27551 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
27552 ARM_ARCH_NONE,
27553 FPU_ARCH_FPA),
27554 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
27555 ARM_ARCH_NONE,
27556 FPU_ARCH_FPA),
27557 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
27558 ARM_ARCH_NONE,
27559 FPU_ARCH_FPA),
27560 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
27561 ARM_ARCH_NONE,
27562 FPU_ARCH_FPA),
27563 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
27564 ARM_ARCH_NONE,
27565 FPU_ARCH_FPA),
27566 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
27567 ARM_ARCH_NONE,
27568 FPU_ARCH_FPA),
27569 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
27570 ARM_ARCH_NONE,
27571 FPU_ARCH_FPA),
27572 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
27573 ARM_ARCH_NONE,
27574 FPU_ARCH_FPA),
27575 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
27576 ARM_ARCH_NONE,
27577 FPU_ARCH_FPA),
27578 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
27579 ARM_ARCH_NONE,
27580 FPU_ARCH_FPA),
27581 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
27582 ARM_ARCH_NONE,
27583 FPU_ARCH_FPA),
27584 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
27585 ARM_ARCH_NONE,
27586 FPU_ARCH_FPA),
27587 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
27588 ARM_ARCH_NONE,
27589 FPU_ARCH_FPA),
27590 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
27591 ARM_ARCH_NONE,
27592 FPU_ARCH_FPA),
27593
27594 /* For V5 or later processors we default to using VFP; but the user
27595 should really set the FPU type explicitly. */
27596 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
27597 ARM_ARCH_NONE,
27598 FPU_ARCH_VFP_V2),
27599 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
27600 ARM_ARCH_NONE,
27601 FPU_ARCH_VFP_V2),
27602 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
27603 ARM_ARCH_NONE,
27604 FPU_ARCH_VFP_V2),
27605 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
27606 ARM_ARCH_NONE,
27607 FPU_ARCH_VFP_V2),
27608 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
27609 ARM_ARCH_NONE,
27610 FPU_ARCH_VFP_V2),
27611 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
27612 ARM_ARCH_NONE,
27613 FPU_ARCH_VFP_V2),
27614 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
27615 ARM_ARCH_NONE,
27616 FPU_ARCH_VFP_V2),
27617 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
27618 ARM_ARCH_NONE,
27619 FPU_ARCH_VFP_V2),
27620 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
27621 ARM_ARCH_NONE,
27622 FPU_ARCH_VFP_V2),
27623 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
27624 ARM_ARCH_NONE,
27625 FPU_ARCH_VFP_V2),
27626 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
27627 ARM_ARCH_NONE,
27628 FPU_ARCH_VFP_V2),
27629 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
27630 ARM_ARCH_NONE,
27631 FPU_ARCH_VFP_V2),
27632 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
27633 ARM_ARCH_NONE,
27634 FPU_ARCH_VFP_V1),
27635 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
27636 ARM_ARCH_NONE,
27637 FPU_ARCH_VFP_V1),
27638 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
27639 ARM_ARCH_NONE,
27640 FPU_ARCH_VFP_V2),
27641 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
27642 ARM_ARCH_NONE,
27643 FPU_ARCH_VFP_V2),
27644 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
27645 ARM_ARCH_NONE,
27646 FPU_ARCH_VFP_V1),
27647 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
27648 ARM_ARCH_NONE,
27649 FPU_ARCH_VFP_V2),
27650 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
27651 ARM_ARCH_NONE,
27652 FPU_ARCH_VFP_V2),
27653 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
27654 ARM_ARCH_NONE,
27655 FPU_ARCH_VFP_V2),
27656 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
27657 ARM_ARCH_NONE,
27658 FPU_ARCH_VFP_V2),
27659 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
27660 ARM_ARCH_NONE,
27661 FPU_ARCH_VFP_V2),
27662 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
27663 ARM_ARCH_NONE,
27664 FPU_ARCH_VFP_V2),
27665 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
27666 ARM_ARCH_NONE,
27667 FPU_ARCH_VFP_V2),
27668 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
27669 ARM_ARCH_NONE,
27670 FPU_ARCH_VFP_V2),
27671 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
27672 ARM_ARCH_NONE,
27673 FPU_ARCH_VFP_V2),
27674 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
27675 ARM_ARCH_NONE,
27676 FPU_NONE),
27677 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
27678 ARM_ARCH_NONE,
27679 FPU_NONE),
27680 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
27681 ARM_ARCH_NONE,
27682 FPU_ARCH_VFP_V2),
27683 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
27684 ARM_ARCH_NONE,
27685 FPU_ARCH_VFP_V2),
27686 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
27687 ARM_ARCH_NONE,
27688 FPU_ARCH_VFP_V2),
27689 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
27690 ARM_ARCH_NONE,
27691 FPU_NONE),
27692 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
27693 ARM_ARCH_NONE,
27694 FPU_NONE),
27695 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
27696 ARM_ARCH_NONE,
27697 FPU_ARCH_VFP_V2),
27698 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
27699 ARM_ARCH_NONE,
27700 FPU_NONE),
27701 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
27702 ARM_ARCH_NONE,
27703 FPU_ARCH_VFP_V2),
27704 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
27705 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27706 FPU_NONE),
27707 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
27708 ARM_ARCH_NONE,
27709 FPU_ARCH_NEON_VFP_V4),
27710 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
27711 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27712 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
27713 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
27714 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27715 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
27716 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
27717 ARM_ARCH_NONE,
27718 FPU_ARCH_NEON_VFP_V4),
27719 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
27720 ARM_ARCH_NONE,
27721 FPU_ARCH_NEON_VFP_V4),
27722 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
27723 ARM_ARCH_NONE,
27724 FPU_ARCH_NEON_VFP_V4),
27725 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
27726 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27727 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27728 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
27729 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27730 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27731 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
27732 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27733 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27734 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
27735 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27736 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27737 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
27738 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27739 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27740 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
27741 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27742 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27743 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
27744 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27745 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27746 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
27747 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27748 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27749 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
27750 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27751 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27752 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
27753 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27754 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27755 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
27756 ARM_ARCH_NONE,
27757 FPU_NONE),
27758 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
27759 ARM_ARCH_NONE,
27760 FPU_ARCH_VFP_V3D16),
27761 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
27762 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27763 FPU_NONE),
27764 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
27765 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27766 FPU_ARCH_VFP_V3D16),
27767 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
27768 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
27769 FPU_ARCH_VFP_V3D16),
27770 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
27771 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27772 FPU_ARCH_NEON_VFP_ARMV8),
27773 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
27774 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27775 FPU_NONE),
27776 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
27777 ARM_ARCH_NONE,
27778 FPU_NONE),
27779 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
27780 ARM_ARCH_NONE,
27781 FPU_NONE),
27782 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
27783 ARM_ARCH_NONE,
27784 FPU_NONE),
27785 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
27786 ARM_ARCH_NONE,
27787 FPU_NONE),
27788 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
27789 ARM_ARCH_NONE,
27790 FPU_NONE),
27791 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
27792 ARM_ARCH_NONE,
27793 FPU_NONE),
27794 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
27795 ARM_ARCH_NONE,
27796 FPU_NONE),
27797 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
27798 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27799 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27800 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
27801 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27802 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27803 /* ??? XSCALE is really an architecture. */
27804 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
27805 ARM_ARCH_NONE,
27806 FPU_ARCH_VFP_V2),
27807
27808 /* ??? iwmmxt is not a processor. */
27809 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
27810 ARM_ARCH_NONE,
27811 FPU_ARCH_VFP_V2),
27812 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
27813 ARM_ARCH_NONE,
27814 FPU_ARCH_VFP_V2),
27815 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
27816 ARM_ARCH_NONE,
27817 FPU_ARCH_VFP_V2),
27818
27819 /* Maverick. */
27820 ARM_CPU_OPT ("ep9312", "ARM920T",
27821 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
27822 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
27823
27824 /* Marvell processors. */
27825 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
27826 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27827 FPU_ARCH_VFP_V3D16),
27828 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
27829 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27830 FPU_ARCH_NEON_VFP_V4),
27831
27832 /* APM X-Gene family. */
27833 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
27834 ARM_ARCH_NONE,
27835 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27836 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
27837 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27838 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27839
27840 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27841 };
27842 #undef ARM_CPU_OPT
27843
27844 struct arm_ext_table
27845 {
27846 const char * name;
27847 size_t name_len;
27848 const arm_feature_set merge;
27849 const arm_feature_set clear;
27850 };
27851
27852 struct arm_arch_option_table
27853 {
27854 const char * name;
27855 size_t name_len;
27856 const arm_feature_set value;
27857 const arm_feature_set default_fpu;
27858 const struct arm_ext_table * ext_table;
27859 };
27860
27861 /* Used to add support for +E and +noE extension. */
27862 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27863 /* Used to add support for a +E extension. */
27864 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27865 /* Used to add support for a +noE extension. */
27866 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27867
27868 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27869 ~0 & ~FPU_ENDIAN_PURE)
27870
27871 static const struct arm_ext_table armv5te_ext_table[] =
27872 {
27873 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
27874 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27875 };
27876
27877 static const struct arm_ext_table armv7_ext_table[] =
27878 {
27879 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27880 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27881 };
27882
27883 static const struct arm_ext_table armv7ve_ext_table[] =
27884 {
27885 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
27886 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
27887 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27888 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27889 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27890 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
27891 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27892
27893 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
27894 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27895
27896 /* Aliases for +simd. */
27897 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27898
27899 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27900 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27901 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27902
27903 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27904 };
27905
27906 static const struct arm_ext_table armv7a_ext_table[] =
27907 {
27908 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27909 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27910 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27911 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27912 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27913 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
27914 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27915
27916 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
27917 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27918
27919 /* Aliases for +simd. */
27920 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27921 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27922
27923 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27924 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27925
27926 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
27927 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
27928 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27929 };
27930
27931 static const struct arm_ext_table armv7r_ext_table[] =
27932 {
27933 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
27934 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
27935 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27936 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27937 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
27938 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27939 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27940 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
27941 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27942 };
27943
27944 static const struct arm_ext_table armv7em_ext_table[] =
27945 {
27946 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
27947 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27948 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
27949 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
27950 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27951 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
27952 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27953 };
27954
27955 static const struct arm_ext_table armv8a_ext_table[] =
27956 {
27957 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27958 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27959 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27960 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27961
27962 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27963 should use the +simd option to turn on FP. */
27964 ARM_REMOVE ("fp", ALL_FP),
27965 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27966 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27967 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27968 };
27969
27970
27971 static const struct arm_ext_table armv81a_ext_table[] =
27972 {
27973 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27974 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27975 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27976
27977 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27978 should use the +simd option to turn on FP. */
27979 ARM_REMOVE ("fp", ALL_FP),
27980 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27981 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27982 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27983 };
27984
27985 static const struct arm_ext_table armv82a_ext_table[] =
27986 {
27987 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27988 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
27989 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
27990 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27991 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27992 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27993
27994 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27995 should use the +simd option to turn on FP. */
27996 ARM_REMOVE ("fp", ALL_FP),
27997 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27998 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27999 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28000 };
28001
28002 static const struct arm_ext_table armv84a_ext_table[] =
28003 {
28004 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28005 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28006 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28007 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28008
28009 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28010 should use the +simd option to turn on FP. */
28011 ARM_REMOVE ("fp", ALL_FP),
28012 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28013 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28014 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28015 };
28016
28017 static const struct arm_ext_table armv85a_ext_table[] =
28018 {
28019 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28020 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28021 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28022 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28023
28024 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28025 should use the +simd option to turn on FP. */
28026 ARM_REMOVE ("fp", ALL_FP),
28027 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28028 };
28029
28030 static const struct arm_ext_table armv8m_main_ext_table[] =
28031 {
28032 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28033 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28034 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
28035 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28036 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28037 };
28038
28039 static const struct arm_ext_table armv8_1m_main_ext_table[] =
28040 {
28041 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28042 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28043 ARM_EXT ("fp",
28044 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28045 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
28046 ALL_FP),
28047 ARM_ADD ("fp.dp",
28048 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28049 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28050 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
28051 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
28052 ARM_ADD ("mve.fp",
28053 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28054 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
28055 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28056 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28057 };
28058
28059 static const struct arm_ext_table armv8r_ext_table[] =
28060 {
28061 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28062 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28063 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28064 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28065 ARM_REMOVE ("fp", ALL_FP),
28066 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
28067 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28068 };
28069
28070 /* This list should, at a minimum, contain all the architecture names
28071 recognized by GCC. */
28072 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28073 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28074 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28075
28076 static const struct arm_arch_option_table arm_archs[] =
28077 {
28078 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
28079 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
28080 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
28081 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
28082 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
28083 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
28084 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
28085 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
28086 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
28087 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
28088 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
28089 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
28090 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
28091 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
28092 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
28093 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
28094 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
28095 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28096 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28097 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
28098 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
28099 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28100 kept to preserve existing behaviour. */
28101 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28102 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28103 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
28104 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
28105 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
28106 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28107 kept to preserve existing behaviour. */
28108 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28109 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28110 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
28111 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
28112 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
28113 /* The official spelling of the ARMv7 profile variants is the dashed form.
28114 Accept the non-dashed form for compatibility with old toolchains. */
28115 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28116 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
28117 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28118 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28119 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28120 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28121 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28122 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
28123 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
28124 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
28125 armv8m_main),
28126 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
28127 armv8_1m_main),
28128 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
28129 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
28130 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
28131 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
28132 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
28133 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
28134 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
28135 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
28136 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
28137 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
28138 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28139 };
28140 #undef ARM_ARCH_OPT
28141
28142 /* ISA extensions in the co-processor and main instruction set space. */
28143
28144 struct arm_option_extension_value_table
28145 {
28146 const char * name;
28147 size_t name_len;
28148 const arm_feature_set merge_value;
28149 const arm_feature_set clear_value;
28150 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28151 indicates that an extension is available for all architectures while
28152 ARM_ANY marks an empty entry. */
28153 const arm_feature_set allowed_archs[2];
28154 };
28155
28156 /* The following table must be in alphabetical order with a NULL last entry. */
28157
28158 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28159 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28160
28161 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28162 use the context sensitive approach using arm_ext_table's. */
28163 static const struct arm_option_extension_value_table arm_extensions[] =
28164 {
28165 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28166 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28167 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28168 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
28169 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28170 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
28171 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
28172 ARM_ARCH_V8_2A),
28173 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28174 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28175 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
28176 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
28177 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28178 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28179 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28180 ARM_ARCH_V8_2A),
28181 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28182 | ARM_EXT2_FP16_FML),
28183 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28184 | ARM_EXT2_FP16_FML),
28185 ARM_ARCH_V8_2A),
28186 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28187 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28188 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28189 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28190 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28191 Thumb divide instruction. Due to this having the same name as the
28192 previous entry, this will be ignored when doing command-line parsing and
28193 only considered by build attribute selection code. */
28194 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28195 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28196 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
28197 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
28198 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
28199 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
28200 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
28201 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
28202 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
28203 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28204 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28205 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28206 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28207 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28208 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
28210 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
28211 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
28212 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28213 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28214 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28215 ARM_ARCH_V8A),
28216 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
28217 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
28218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28219 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
28220 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
28221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28222 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28224 ARM_ARCH_V8A),
28225 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28226 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28227 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
28228 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28229 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
28230 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
28231 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28232 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
28233 | ARM_EXT_DIV),
28234 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
28235 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28236 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
28237 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
28238 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
28239 };
28240 #undef ARM_EXT_OPT
28241
28242 /* ISA floating-point and Advanced SIMD extensions. */
28243 struct arm_option_fpu_value_table
28244 {
28245 const char * name;
28246 const arm_feature_set value;
28247 };
28248
28249 /* This list should, at a minimum, contain all the fpu names
28250 recognized by GCC. */
28251 static const struct arm_option_fpu_value_table arm_fpus[] =
28252 {
28253 {"softfpa", FPU_NONE},
28254 {"fpe", FPU_ARCH_FPE},
28255 {"fpe2", FPU_ARCH_FPE},
28256 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
28257 {"fpa", FPU_ARCH_FPA},
28258 {"fpa10", FPU_ARCH_FPA},
28259 {"fpa11", FPU_ARCH_FPA},
28260 {"arm7500fe", FPU_ARCH_FPA},
28261 {"softvfp", FPU_ARCH_VFP},
28262 {"softvfp+vfp", FPU_ARCH_VFP_V2},
28263 {"vfp", FPU_ARCH_VFP_V2},
28264 {"vfp9", FPU_ARCH_VFP_V2},
28265 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
28266 {"vfp10", FPU_ARCH_VFP_V2},
28267 {"vfp10-r0", FPU_ARCH_VFP_V1},
28268 {"vfpxd", FPU_ARCH_VFP_V1xD},
28269 {"vfpv2", FPU_ARCH_VFP_V2},
28270 {"vfpv3", FPU_ARCH_VFP_V3},
28271 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
28272 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
28273 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
28274 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
28275 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
28276 {"arm1020t", FPU_ARCH_VFP_V1},
28277 {"arm1020e", FPU_ARCH_VFP_V2},
28278 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
28279 {"arm1136jf-s", FPU_ARCH_VFP_V2},
28280 {"maverick", FPU_ARCH_MAVERICK},
28281 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28282 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28283 {"neon-fp16", FPU_ARCH_NEON_FP16},
28284 {"vfpv4", FPU_ARCH_VFP_V4},
28285 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
28286 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
28287 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
28288 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
28289 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
28290 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
28291 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
28292 {"crypto-neon-fp-armv8",
28293 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
28294 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
28295 {"crypto-neon-fp-armv8.1",
28296 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
28297 {NULL, ARM_ARCH_NONE}
28298 };
28299
28300 struct arm_option_value_table
28301 {
28302 const char *name;
28303 long value;
28304 };
28305
28306 static const struct arm_option_value_table arm_float_abis[] =
28307 {
28308 {"hard", ARM_FLOAT_ABI_HARD},
28309 {"softfp", ARM_FLOAT_ABI_SOFTFP},
28310 {"soft", ARM_FLOAT_ABI_SOFT},
28311 {NULL, 0}
28312 };
28313
28314 #ifdef OBJ_ELF
28315 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28316 static const struct arm_option_value_table arm_eabis[] =
28317 {
28318 {"gnu", EF_ARM_EABI_UNKNOWN},
28319 {"4", EF_ARM_EABI_VER4},
28320 {"5", EF_ARM_EABI_VER5},
28321 {NULL, 0}
28322 };
28323 #endif
28324
28325 struct arm_long_option_table
28326 {
28327 const char * option; /* Substring to match. */
28328 const char * help; /* Help information. */
28329 int (* func) (const char * subopt); /* Function to decode sub-option. */
28330 const char * deprecated; /* If non-null, print this message. */
28331 };
28332
28333 static bfd_boolean
28334 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
28335 arm_feature_set *ext_set,
28336 const struct arm_ext_table *ext_table)
28337 {
28338 /* We insist on extensions being specified in alphabetical order, and with
28339 extensions being added before being removed. We achieve this by having
28340 the global ARM_EXTENSIONS table in alphabetical order, and using the
28341 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28342 or removing it (0) and only allowing it to change in the order
28343 -1 -> 1 -> 0. */
28344 const struct arm_option_extension_value_table * opt = NULL;
28345 const arm_feature_set arm_any = ARM_ANY;
28346 int adding_value = -1;
28347
28348 while (str != NULL && *str != 0)
28349 {
28350 const char *ext;
28351 size_t len;
28352
28353 if (*str != '+')
28354 {
28355 as_bad (_("invalid architectural extension"));
28356 return FALSE;
28357 }
28358
28359 str++;
28360 ext = strchr (str, '+');
28361
28362 if (ext != NULL)
28363 len = ext - str;
28364 else
28365 len = strlen (str);
28366
28367 if (len >= 2 && strncmp (str, "no", 2) == 0)
28368 {
28369 if (adding_value != 0)
28370 {
28371 adding_value = 0;
28372 opt = arm_extensions;
28373 }
28374
28375 len -= 2;
28376 str += 2;
28377 }
28378 else if (len > 0)
28379 {
28380 if (adding_value == -1)
28381 {
28382 adding_value = 1;
28383 opt = arm_extensions;
28384 }
28385 else if (adding_value != 1)
28386 {
28387 as_bad (_("must specify extensions to add before specifying "
28388 "those to remove"));
28389 return FALSE;
28390 }
28391 }
28392
28393 if (len == 0)
28394 {
28395 as_bad (_("missing architectural extension"));
28396 return FALSE;
28397 }
28398
28399 gas_assert (adding_value != -1);
28400 gas_assert (opt != NULL);
28401
28402 if (ext_table != NULL)
28403 {
28404 const struct arm_ext_table * ext_opt = ext_table;
28405 bfd_boolean found = FALSE;
28406 for (; ext_opt->name != NULL; ext_opt++)
28407 if (ext_opt->name_len == len
28408 && strncmp (ext_opt->name, str, len) == 0)
28409 {
28410 if (adding_value)
28411 {
28412 if (ARM_FEATURE_ZERO (ext_opt->merge))
28413 /* TODO: Option not supported. When we remove the
28414 legacy table this case should error out. */
28415 continue;
28416
28417 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
28418 }
28419 else
28420 {
28421 if (ARM_FEATURE_ZERO (ext_opt->clear))
28422 /* TODO: Option not supported. When we remove the
28423 legacy table this case should error out. */
28424 continue;
28425 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
28426 }
28427 found = TRUE;
28428 break;
28429 }
28430 if (found)
28431 {
28432 str = ext;
28433 continue;
28434 }
28435 }
28436
28437 /* Scan over the options table trying to find an exact match. */
28438 for (; opt->name != NULL; opt++)
28439 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28440 {
28441 int i, nb_allowed_archs =
28442 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28443 /* Check we can apply the extension to this architecture. */
28444 for (i = 0; i < nb_allowed_archs; i++)
28445 {
28446 /* Empty entry. */
28447 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
28448 continue;
28449 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
28450 break;
28451 }
28452 if (i == nb_allowed_archs)
28453 {
28454 as_bad (_("extension does not apply to the base architecture"));
28455 return FALSE;
28456 }
28457
28458 /* Add or remove the extension. */
28459 if (adding_value)
28460 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
28461 else
28462 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
28463
28464 /* Allowing Thumb division instructions for ARMv7 in autodetection
28465 rely on this break so that duplicate extensions (extensions
28466 with the same name as a previous extension in the list) are not
28467 considered for command-line parsing. */
28468 break;
28469 }
28470
28471 if (opt->name == NULL)
28472 {
28473 /* Did we fail to find an extension because it wasn't specified in
28474 alphabetical order, or because it does not exist? */
28475
28476 for (opt = arm_extensions; opt->name != NULL; opt++)
28477 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28478 break;
28479
28480 if (opt->name == NULL)
28481 as_bad (_("unknown architectural extension `%s'"), str);
28482 else
28483 as_bad (_("architectural extensions must be specified in "
28484 "alphabetical order"));
28485
28486 return FALSE;
28487 }
28488 else
28489 {
28490 /* We should skip the extension we've just matched the next time
28491 round. */
28492 opt++;
28493 }
28494
28495 str = ext;
28496 };
28497
28498 return TRUE;
28499 }
28500
28501 static bfd_boolean
28502 arm_parse_cpu (const char *str)
28503 {
28504 const struct arm_cpu_option_table *opt;
28505 const char *ext = strchr (str, '+');
28506 size_t len;
28507
28508 if (ext != NULL)
28509 len = ext - str;
28510 else
28511 len = strlen (str);
28512
28513 if (len == 0)
28514 {
28515 as_bad (_("missing cpu name `%s'"), str);
28516 return FALSE;
28517 }
28518
28519 for (opt = arm_cpus; opt->name != NULL; opt++)
28520 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28521 {
28522 mcpu_cpu_opt = &opt->value;
28523 if (mcpu_ext_opt == NULL)
28524 mcpu_ext_opt = XNEW (arm_feature_set);
28525 *mcpu_ext_opt = opt->ext;
28526 mcpu_fpu_opt = &opt->default_fpu;
28527 if (opt->canonical_name)
28528 {
28529 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
28530 strcpy (selected_cpu_name, opt->canonical_name);
28531 }
28532 else
28533 {
28534 size_t i;
28535
28536 if (len >= sizeof selected_cpu_name)
28537 len = (sizeof selected_cpu_name) - 1;
28538
28539 for (i = 0; i < len; i++)
28540 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28541 selected_cpu_name[i] = 0;
28542 }
28543
28544 if (ext != NULL)
28545 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
28546
28547 return TRUE;
28548 }
28549
28550 as_bad (_("unknown cpu `%s'"), str);
28551 return FALSE;
28552 }
28553
28554 static bfd_boolean
28555 arm_parse_arch (const char *str)
28556 {
28557 const struct arm_arch_option_table *opt;
28558 const char *ext = strchr (str, '+');
28559 size_t len;
28560
28561 if (ext != NULL)
28562 len = ext - str;
28563 else
28564 len = strlen (str);
28565
28566 if (len == 0)
28567 {
28568 as_bad (_("missing architecture name `%s'"), str);
28569 return FALSE;
28570 }
28571
28572 for (opt = arm_archs; opt->name != NULL; opt++)
28573 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28574 {
28575 march_cpu_opt = &opt->value;
28576 if (march_ext_opt == NULL)
28577 march_ext_opt = XNEW (arm_feature_set);
28578 *march_ext_opt = arm_arch_none;
28579 march_fpu_opt = &opt->default_fpu;
28580 strcpy (selected_cpu_name, opt->name);
28581
28582 if (ext != NULL)
28583 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
28584 opt->ext_table);
28585
28586 return TRUE;
28587 }
28588
28589 as_bad (_("unknown architecture `%s'\n"), str);
28590 return FALSE;
28591 }
28592
28593 static bfd_boolean
28594 arm_parse_fpu (const char * str)
28595 {
28596 const struct arm_option_fpu_value_table * opt;
28597
28598 for (opt = arm_fpus; opt->name != NULL; opt++)
28599 if (streq (opt->name, str))
28600 {
28601 mfpu_opt = &opt->value;
28602 return TRUE;
28603 }
28604
28605 as_bad (_("unknown floating point format `%s'\n"), str);
28606 return FALSE;
28607 }
28608
28609 static bfd_boolean
28610 arm_parse_float_abi (const char * str)
28611 {
28612 const struct arm_option_value_table * opt;
28613
28614 for (opt = arm_float_abis; opt->name != NULL; opt++)
28615 if (streq (opt->name, str))
28616 {
28617 mfloat_abi_opt = opt->value;
28618 return TRUE;
28619 }
28620
28621 as_bad (_("unknown floating point abi `%s'\n"), str);
28622 return FALSE;
28623 }
28624
28625 #ifdef OBJ_ELF
28626 static bfd_boolean
28627 arm_parse_eabi (const char * str)
28628 {
28629 const struct arm_option_value_table *opt;
28630
28631 for (opt = arm_eabis; opt->name != NULL; opt++)
28632 if (streq (opt->name, str))
28633 {
28634 meabi_flags = opt->value;
28635 return TRUE;
28636 }
28637 as_bad (_("unknown EABI `%s'\n"), str);
28638 return FALSE;
28639 }
28640 #endif
28641
28642 static bfd_boolean
28643 arm_parse_it_mode (const char * str)
28644 {
28645 bfd_boolean ret = TRUE;
28646
28647 if (streq ("arm", str))
28648 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
28649 else if (streq ("thumb", str))
28650 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
28651 else if (streq ("always", str))
28652 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
28653 else if (streq ("never", str))
28654 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
28655 else
28656 {
28657 as_bad (_("unknown implicit IT mode `%s', should be "\
28658 "arm, thumb, always, or never."), str);
28659 ret = FALSE;
28660 }
28661
28662 return ret;
28663 }
28664
28665 static bfd_boolean
28666 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
28667 {
28668 codecomposer_syntax = TRUE;
28669 arm_comment_chars[0] = ';';
28670 arm_line_separator_chars[0] = 0;
28671 return TRUE;
28672 }
28673
28674 struct arm_long_option_table arm_long_opts[] =
28675 {
28676 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
28677 arm_parse_cpu, NULL},
28678 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
28679 arm_parse_arch, NULL},
28680 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
28681 arm_parse_fpu, NULL},
28682 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
28683 arm_parse_float_abi, NULL},
28684 #ifdef OBJ_ELF
28685 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
28686 arm_parse_eabi, NULL},
28687 #endif
28688 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
28689 arm_parse_it_mode, NULL},
28690 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
28691 arm_ccs_mode, NULL},
28692 {NULL, NULL, 0, NULL}
28693 };
28694
28695 int
28696 md_parse_option (int c, const char * arg)
28697 {
28698 struct arm_option_table *opt;
28699 const struct arm_legacy_option_table *fopt;
28700 struct arm_long_option_table *lopt;
28701
28702 switch (c)
28703 {
28704 #ifdef OPTION_EB
28705 case OPTION_EB:
28706 target_big_endian = 1;
28707 break;
28708 #endif
28709
28710 #ifdef OPTION_EL
28711 case OPTION_EL:
28712 target_big_endian = 0;
28713 break;
28714 #endif
28715
28716 case OPTION_FIX_V4BX:
28717 fix_v4bx = TRUE;
28718 break;
28719
28720 #ifdef OBJ_ELF
28721 case OPTION_FDPIC:
28722 arm_fdpic = TRUE;
28723 break;
28724 #endif /* OBJ_ELF */
28725
28726 case 'a':
28727 /* Listing option. Just ignore these, we don't support additional
28728 ones. */
28729 return 0;
28730
28731 default:
28732 for (opt = arm_opts; opt->option != NULL; opt++)
28733 {
28734 if (c == opt->option[0]
28735 && ((arg == NULL && opt->option[1] == 0)
28736 || streq (arg, opt->option + 1)))
28737 {
28738 /* If the option is deprecated, tell the user. */
28739 if (warn_on_deprecated && opt->deprecated != NULL)
28740 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
28741 arg ? arg : "", _(opt->deprecated));
28742
28743 if (opt->var != NULL)
28744 *opt->var = opt->value;
28745
28746 return 1;
28747 }
28748 }
28749
28750 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
28751 {
28752 if (c == fopt->option[0]
28753 && ((arg == NULL && fopt->option[1] == 0)
28754 || streq (arg, fopt->option + 1)))
28755 {
28756 /* If the option is deprecated, tell the user. */
28757 if (warn_on_deprecated && fopt->deprecated != NULL)
28758 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
28759 arg ? arg : "", _(fopt->deprecated));
28760
28761 if (fopt->var != NULL)
28762 *fopt->var = &fopt->value;
28763
28764 return 1;
28765 }
28766 }
28767
28768 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28769 {
28770 /* These options are expected to have an argument. */
28771 if (c == lopt->option[0]
28772 && arg != NULL
28773 && strncmp (arg, lopt->option + 1,
28774 strlen (lopt->option + 1)) == 0)
28775 {
28776 /* If the option is deprecated, tell the user. */
28777 if (warn_on_deprecated && lopt->deprecated != NULL)
28778 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
28779 _(lopt->deprecated));
28780
28781 /* Call the sup-option parser. */
28782 return lopt->func (arg + strlen (lopt->option) - 1);
28783 }
28784 }
28785
28786 return 0;
28787 }
28788
28789 return 1;
28790 }
28791
28792 void
28793 md_show_usage (FILE * fp)
28794 {
28795 struct arm_option_table *opt;
28796 struct arm_long_option_table *lopt;
28797
28798 fprintf (fp, _(" ARM-specific assembler options:\n"));
28799
28800 for (opt = arm_opts; opt->option != NULL; opt++)
28801 if (opt->help != NULL)
28802 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
28803
28804 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28805 if (lopt->help != NULL)
28806 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
28807
28808 #ifdef OPTION_EB
28809 fprintf (fp, _("\
28810 -EB assemble code for a big-endian cpu\n"));
28811 #endif
28812
28813 #ifdef OPTION_EL
28814 fprintf (fp, _("\
28815 -EL assemble code for a little-endian cpu\n"));
28816 #endif
28817
28818 fprintf (fp, _("\
28819 --fix-v4bx Allow BX in ARMv4 code\n"));
28820
28821 #ifdef OBJ_ELF
28822 fprintf (fp, _("\
28823 --fdpic generate an FDPIC object file\n"));
28824 #endif /* OBJ_ELF */
28825 }
28826
28827 #ifdef OBJ_ELF
28828
28829 typedef struct
28830 {
28831 int val;
28832 arm_feature_set flags;
28833 } cpu_arch_ver_table;
28834
28835 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28836 chronologically for architectures, with an exception for ARMv6-M and
28837 ARMv6S-M due to legacy reasons. No new architecture should have a
28838 special case. This allows for build attribute selection results to be
28839 stable when new architectures are added. */
28840 static const cpu_arch_ver_table cpu_arch_ver[] =
28841 {
28842 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
28843 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
28844 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
28845 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
28846 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
28847 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
28848 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
28849 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
28850 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
28851 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
28852 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
28853 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
28854 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
28855 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
28856 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
28857 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
28858 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
28859 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
28860 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
28861 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
28862 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
28863 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
28864 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
28865 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
28866
28867 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28868 always selected build attributes to match those of ARMv6-M
28869 (resp. ARMv6S-M). However, due to these architectures being a strict
28870 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28871 would be selected when fully respecting chronology of architectures.
28872 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28873 move them before ARMv7 architectures. */
28874 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
28875 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
28876
28877 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
28878 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
28879 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
28880 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
28881 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
28882 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
28883 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
28884 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
28885 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
28886 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
28887 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
28888 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
28889 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
28890 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
28891 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
28892 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
28893 {-1, ARM_ARCH_NONE}
28894 };
28895
28896 /* Set an attribute if it has not already been set by the user. */
28897
28898 static void
28899 aeabi_set_attribute_int (int tag, int value)
28900 {
28901 if (tag < 1
28902 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28903 || !attributes_set_explicitly[tag])
28904 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
28905 }
28906
28907 static void
28908 aeabi_set_attribute_string (int tag, const char *value)
28909 {
28910 if (tag < 1
28911 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28912 || !attributes_set_explicitly[tag])
28913 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
28914 }
28915
28916 /* Return whether features in the *NEEDED feature set are available via
28917 extensions for the architecture whose feature set is *ARCH_FSET. */
28918
28919 static bfd_boolean
28920 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
28921 const arm_feature_set *needed)
28922 {
28923 int i, nb_allowed_archs;
28924 arm_feature_set ext_fset;
28925 const struct arm_option_extension_value_table *opt;
28926
28927 ext_fset = arm_arch_none;
28928 for (opt = arm_extensions; opt->name != NULL; opt++)
28929 {
28930 /* Extension does not provide any feature we need. */
28931 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
28932 continue;
28933
28934 nb_allowed_archs =
28935 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28936 for (i = 0; i < nb_allowed_archs; i++)
28937 {
28938 /* Empty entry. */
28939 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
28940 break;
28941
28942 /* Extension is available, add it. */
28943 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
28944 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
28945 }
28946 }
28947
28948 /* Can we enable all features in *needed? */
28949 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
28950 }
28951
28952 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28953 a given architecture feature set *ARCH_EXT_FSET including extension feature
28954 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28955 - if true, check for an exact match of the architecture modulo extensions;
28956 - otherwise, select build attribute value of the first superset
28957 architecture released so that results remains stable when new architectures
28958 are added.
28959 For -march/-mcpu=all the build attribute value of the most featureful
28960 architecture is returned. Tag_CPU_arch_profile result is returned in
28961 PROFILE. */
28962
28963 static int
28964 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
28965 const arm_feature_set *ext_fset,
28966 char *profile, int exact_match)
28967 {
28968 arm_feature_set arch_fset;
28969 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
28970
28971 /* Select most featureful architecture with all its extensions if building
28972 for -march=all as the feature sets used to set build attributes. */
28973 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
28974 {
28975 /* Force revisiting of decision for each new architecture. */
28976 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
28977 *profile = 'A';
28978 return TAG_CPU_ARCH_V8;
28979 }
28980
28981 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
28982
28983 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
28984 {
28985 arm_feature_set known_arch_fset;
28986
28987 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
28988 if (exact_match)
28989 {
28990 /* Base architecture match user-specified architecture and
28991 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28992 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
28993 {
28994 p_ver_ret = p_ver;
28995 goto found;
28996 }
28997 /* Base architecture match user-specified architecture only
28998 (eg. ARMv6-M in the same case as above). Record it in case we
28999 find a match with above condition. */
29000 else if (p_ver_ret == NULL
29001 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
29002 p_ver_ret = p_ver;
29003 }
29004 else
29005 {
29006
29007 /* Architecture has all features wanted. */
29008 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
29009 {
29010 arm_feature_set added_fset;
29011
29012 /* Compute features added by this architecture over the one
29013 recorded in p_ver_ret. */
29014 if (p_ver_ret != NULL)
29015 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
29016 p_ver_ret->flags);
29017 /* First architecture that match incl. with extensions, or the
29018 only difference in features over the recorded match is
29019 features that were optional and are now mandatory. */
29020 if (p_ver_ret == NULL
29021 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
29022 {
29023 p_ver_ret = p_ver;
29024 goto found;
29025 }
29026 }
29027 else if (p_ver_ret == NULL)
29028 {
29029 arm_feature_set needed_ext_fset;
29030
29031 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
29032
29033 /* Architecture has all features needed when using some
29034 extensions. Record it and continue searching in case there
29035 exist an architecture providing all needed features without
29036 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29037 OS extension). */
29038 if (have_ext_for_needed_feat_p (&known_arch_fset,
29039 &needed_ext_fset))
29040 p_ver_ret = p_ver;
29041 }
29042 }
29043 }
29044
29045 if (p_ver_ret == NULL)
29046 return -1;
29047
29048 found:
29049 /* Tag_CPU_arch_profile. */
29050 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
29051 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
29052 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
29053 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
29054 *profile = 'A';
29055 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
29056 *profile = 'R';
29057 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
29058 *profile = 'M';
29059 else
29060 *profile = '\0';
29061 return p_ver_ret->val;
29062 }
29063
29064 /* Set the public EABI object attributes. */
29065
29066 static void
29067 aeabi_set_public_attributes (void)
29068 {
29069 char profile = '\0';
29070 int arch = -1;
29071 int virt_sec = 0;
29072 int fp16_optional = 0;
29073 int skip_exact_match = 0;
29074 arm_feature_set flags, flags_arch, flags_ext;
29075
29076 /* Autodetection mode, choose the architecture based the instructions
29077 actually used. */
29078 if (no_cpu_selected ())
29079 {
29080 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
29081
29082 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
29083 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
29084
29085 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
29086 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
29087
29088 /* Code run during relaxation relies on selected_cpu being set. */
29089 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29090 flags_ext = arm_arch_none;
29091 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
29092 selected_ext = flags_ext;
29093 selected_cpu = flags;
29094 }
29095 /* Otherwise, choose the architecture based on the capabilities of the
29096 requested cpu. */
29097 else
29098 {
29099 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
29100 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
29101 flags_ext = selected_ext;
29102 flags = selected_cpu;
29103 }
29104 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
29105
29106 /* Allow the user to override the reported architecture. */
29107 if (!ARM_FEATURE_ZERO (selected_object_arch))
29108 {
29109 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
29110 flags_ext = arm_arch_none;
29111 }
29112 else
29113 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
29114
29115 /* When this function is run again after relaxation has happened there is no
29116 way to determine whether an architecture or CPU was specified by the user:
29117 - selected_cpu is set above for relaxation to work;
29118 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29119 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29120 Therefore, if not in -march=all case we first try an exact match and fall
29121 back to autodetection. */
29122 if (!skip_exact_match)
29123 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
29124 if (arch == -1)
29125 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
29126 if (arch == -1)
29127 as_bad (_("no architecture contains all the instructions used\n"));
29128
29129 /* Tag_CPU_name. */
29130 if (selected_cpu_name[0])
29131 {
29132 char *q;
29133
29134 q = selected_cpu_name;
29135 if (strncmp (q, "armv", 4) == 0)
29136 {
29137 int i;
29138
29139 q += 4;
29140 for (i = 0; q[i]; i++)
29141 q[i] = TOUPPER (q[i]);
29142 }
29143 aeabi_set_attribute_string (Tag_CPU_name, q);
29144 }
29145
29146 /* Tag_CPU_arch. */
29147 aeabi_set_attribute_int (Tag_CPU_arch, arch);
29148
29149 /* Tag_CPU_arch_profile. */
29150 if (profile != '\0')
29151 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
29152
29153 /* Tag_DSP_extension. */
29154 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
29155 aeabi_set_attribute_int (Tag_DSP_extension, 1);
29156
29157 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29158 /* Tag_ARM_ISA_use. */
29159 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
29160 || ARM_FEATURE_ZERO (flags_arch))
29161 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
29162
29163 /* Tag_THUMB_ISA_use. */
29164 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
29165 || ARM_FEATURE_ZERO (flags_arch))
29166 {
29167 int thumb_isa_use;
29168
29169 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29170 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
29171 thumb_isa_use = 3;
29172 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
29173 thumb_isa_use = 2;
29174 else
29175 thumb_isa_use = 1;
29176 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
29177 }
29178
29179 /* Tag_VFP_arch. */
29180 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
29181 aeabi_set_attribute_int (Tag_VFP_arch,
29182 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29183 ? 7 : 8);
29184 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
29185 aeabi_set_attribute_int (Tag_VFP_arch,
29186 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29187 ? 5 : 6);
29188 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
29189 {
29190 fp16_optional = 1;
29191 aeabi_set_attribute_int (Tag_VFP_arch, 3);
29192 }
29193 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
29194 {
29195 aeabi_set_attribute_int (Tag_VFP_arch, 4);
29196 fp16_optional = 1;
29197 }
29198 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
29199 aeabi_set_attribute_int (Tag_VFP_arch, 2);
29200 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
29201 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
29202 aeabi_set_attribute_int (Tag_VFP_arch, 1);
29203
29204 /* Tag_ABI_HardFP_use. */
29205 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
29206 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
29207 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
29208
29209 /* Tag_WMMX_arch. */
29210 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
29211 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
29212 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
29213 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
29214
29215 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29216 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
29217 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
29218 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
29219 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
29220 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
29221 {
29222 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
29223 {
29224 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
29225 }
29226 else
29227 {
29228 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
29229 fp16_optional = 1;
29230 }
29231 }
29232
29233 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
29234 aeabi_set_attribute_int (Tag_MVE_arch, 2);
29235 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
29236 aeabi_set_attribute_int (Tag_MVE_arch, 1);
29237
29238 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29239 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
29240 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
29241
29242 /* Tag_DIV_use.
29243
29244 We set Tag_DIV_use to two when integer divide instructions have been used
29245 in ARM state, or when Thumb integer divide instructions have been used,
29246 but we have no architecture profile set, nor have we any ARM instructions.
29247
29248 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29249 by the base architecture.
29250
29251 For new architectures we will have to check these tests. */
29252 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
29253 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29254 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
29255 aeabi_set_attribute_int (Tag_DIV_use, 0);
29256 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
29257 || (profile == '\0'
29258 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
29259 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
29260 aeabi_set_attribute_int (Tag_DIV_use, 2);
29261
29262 /* Tag_MP_extension_use. */
29263 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
29264 aeabi_set_attribute_int (Tag_MPextension_use, 1);
29265
29266 /* Tag Virtualization_use. */
29267 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
29268 virt_sec |= 1;
29269 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
29270 virt_sec |= 2;
29271 if (virt_sec != 0)
29272 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
29273 }
29274
29275 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29276 finished and free extension feature bits which will not be used anymore. */
29277
29278 void
29279 arm_md_post_relax (void)
29280 {
29281 aeabi_set_public_attributes ();
29282 XDELETE (mcpu_ext_opt);
29283 mcpu_ext_opt = NULL;
29284 XDELETE (march_ext_opt);
29285 march_ext_opt = NULL;
29286 }
29287
29288 /* Add the default contents for the .ARM.attributes section. */
29289
29290 void
29291 arm_md_end (void)
29292 {
29293 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
29294 return;
29295
29296 aeabi_set_public_attributes ();
29297 }
29298 #endif /* OBJ_ELF */
29299
29300 /* Parse a .cpu directive. */
29301
29302 static void
29303 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
29304 {
29305 const struct arm_cpu_option_table *opt;
29306 char *name;
29307 char saved_char;
29308
29309 name = input_line_pointer;
29310 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29311 input_line_pointer++;
29312 saved_char = *input_line_pointer;
29313 *input_line_pointer = 0;
29314
29315 /* Skip the first "all" entry. */
29316 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
29317 if (streq (opt->name, name))
29318 {
29319 selected_arch = opt->value;
29320 selected_ext = opt->ext;
29321 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29322 if (opt->canonical_name)
29323 strcpy (selected_cpu_name, opt->canonical_name);
29324 else
29325 {
29326 int i;
29327 for (i = 0; opt->name[i]; i++)
29328 selected_cpu_name[i] = TOUPPER (opt->name[i]);
29329
29330 selected_cpu_name[i] = 0;
29331 }
29332 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29333
29334 *input_line_pointer = saved_char;
29335 demand_empty_rest_of_line ();
29336 return;
29337 }
29338 as_bad (_("unknown cpu `%s'"), name);
29339 *input_line_pointer = saved_char;
29340 ignore_rest_of_line ();
29341 }
29342
29343 /* Parse a .arch directive. */
29344
29345 static void
29346 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
29347 {
29348 const struct arm_arch_option_table *opt;
29349 char saved_char;
29350 char *name;
29351
29352 name = input_line_pointer;
29353 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29354 input_line_pointer++;
29355 saved_char = *input_line_pointer;
29356 *input_line_pointer = 0;
29357
29358 /* Skip the first "all" entry. */
29359 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29360 if (streq (opt->name, name))
29361 {
29362 selected_arch = opt->value;
29363 selected_ext = arm_arch_none;
29364 selected_cpu = selected_arch;
29365 strcpy (selected_cpu_name, opt->name);
29366 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29367 *input_line_pointer = saved_char;
29368 demand_empty_rest_of_line ();
29369 return;
29370 }
29371
29372 as_bad (_("unknown architecture `%s'\n"), name);
29373 *input_line_pointer = saved_char;
29374 ignore_rest_of_line ();
29375 }
29376
29377 /* Parse a .object_arch directive. */
29378
29379 static void
29380 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
29381 {
29382 const struct arm_arch_option_table *opt;
29383 char saved_char;
29384 char *name;
29385
29386 name = input_line_pointer;
29387 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29388 input_line_pointer++;
29389 saved_char = *input_line_pointer;
29390 *input_line_pointer = 0;
29391
29392 /* Skip the first "all" entry. */
29393 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29394 if (streq (opt->name, name))
29395 {
29396 selected_object_arch = opt->value;
29397 *input_line_pointer = saved_char;
29398 demand_empty_rest_of_line ();
29399 return;
29400 }
29401
29402 as_bad (_("unknown architecture `%s'\n"), name);
29403 *input_line_pointer = saved_char;
29404 ignore_rest_of_line ();
29405 }
29406
29407 /* Parse a .arch_extension directive. */
29408
29409 static void
29410 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
29411 {
29412 const struct arm_option_extension_value_table *opt;
29413 char saved_char;
29414 char *name;
29415 int adding_value = 1;
29416
29417 name = input_line_pointer;
29418 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29419 input_line_pointer++;
29420 saved_char = *input_line_pointer;
29421 *input_line_pointer = 0;
29422
29423 if (strlen (name) >= 2
29424 && strncmp (name, "no", 2) == 0)
29425 {
29426 adding_value = 0;
29427 name += 2;
29428 }
29429
29430 for (opt = arm_extensions; opt->name != NULL; opt++)
29431 if (streq (opt->name, name))
29432 {
29433 int i, nb_allowed_archs =
29434 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
29435 for (i = 0; i < nb_allowed_archs; i++)
29436 {
29437 /* Empty entry. */
29438 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
29439 continue;
29440 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
29441 break;
29442 }
29443
29444 if (i == nb_allowed_archs)
29445 {
29446 as_bad (_("architectural extension `%s' is not allowed for the "
29447 "current base architecture"), name);
29448 break;
29449 }
29450
29451 if (adding_value)
29452 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
29453 opt->merge_value);
29454 else
29455 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
29456
29457 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29458 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29459 *input_line_pointer = saved_char;
29460 demand_empty_rest_of_line ();
29461 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29462 on this return so that duplicate extensions (extensions with the
29463 same name as a previous extension in the list) are not considered
29464 for command-line parsing. */
29465 return;
29466 }
29467
29468 if (opt->name == NULL)
29469 as_bad (_("unknown architecture extension `%s'\n"), name);
29470
29471 *input_line_pointer = saved_char;
29472 ignore_rest_of_line ();
29473 }
29474
29475 /* Parse a .fpu directive. */
29476
29477 static void
29478 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
29479 {
29480 const struct arm_option_fpu_value_table *opt;
29481 char saved_char;
29482 char *name;
29483
29484 name = input_line_pointer;
29485 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29486 input_line_pointer++;
29487 saved_char = *input_line_pointer;
29488 *input_line_pointer = 0;
29489
29490 for (opt = arm_fpus; opt->name != NULL; opt++)
29491 if (streq (opt->name, name))
29492 {
29493 selected_fpu = opt->value;
29494 #ifndef CPU_DEFAULT
29495 if (no_cpu_selected ())
29496 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
29497 else
29498 #endif
29499 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29500 *input_line_pointer = saved_char;
29501 demand_empty_rest_of_line ();
29502 return;
29503 }
29504
29505 as_bad (_("unknown floating point format `%s'\n"), name);
29506 *input_line_pointer = saved_char;
29507 ignore_rest_of_line ();
29508 }
29509
29510 /* Copy symbol information. */
29511
29512 void
29513 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
29514 {
29515 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
29516 }
29517
29518 #ifdef OBJ_ELF
29519 /* Given a symbolic attribute NAME, return the proper integer value.
29520 Returns -1 if the attribute is not known. */
29521
29522 int
29523 arm_convert_symbolic_attribute (const char *name)
29524 {
29525 static const struct
29526 {
29527 const char * name;
29528 const int tag;
29529 }
29530 attribute_table[] =
29531 {
29532 /* When you modify this table you should
29533 also modify the list in doc/c-arm.texi. */
29534 #define T(tag) {#tag, tag}
29535 T (Tag_CPU_raw_name),
29536 T (Tag_CPU_name),
29537 T (Tag_CPU_arch),
29538 T (Tag_CPU_arch_profile),
29539 T (Tag_ARM_ISA_use),
29540 T (Tag_THUMB_ISA_use),
29541 T (Tag_FP_arch),
29542 T (Tag_VFP_arch),
29543 T (Tag_WMMX_arch),
29544 T (Tag_Advanced_SIMD_arch),
29545 T (Tag_PCS_config),
29546 T (Tag_ABI_PCS_R9_use),
29547 T (Tag_ABI_PCS_RW_data),
29548 T (Tag_ABI_PCS_RO_data),
29549 T (Tag_ABI_PCS_GOT_use),
29550 T (Tag_ABI_PCS_wchar_t),
29551 T (Tag_ABI_FP_rounding),
29552 T (Tag_ABI_FP_denormal),
29553 T (Tag_ABI_FP_exceptions),
29554 T (Tag_ABI_FP_user_exceptions),
29555 T (Tag_ABI_FP_number_model),
29556 T (Tag_ABI_align_needed),
29557 T (Tag_ABI_align8_needed),
29558 T (Tag_ABI_align_preserved),
29559 T (Tag_ABI_align8_preserved),
29560 T (Tag_ABI_enum_size),
29561 T (Tag_ABI_HardFP_use),
29562 T (Tag_ABI_VFP_args),
29563 T (Tag_ABI_WMMX_args),
29564 T (Tag_ABI_optimization_goals),
29565 T (Tag_ABI_FP_optimization_goals),
29566 T (Tag_compatibility),
29567 T (Tag_CPU_unaligned_access),
29568 T (Tag_FP_HP_extension),
29569 T (Tag_VFP_HP_extension),
29570 T (Tag_ABI_FP_16bit_format),
29571 T (Tag_MPextension_use),
29572 T (Tag_DIV_use),
29573 T (Tag_nodefaults),
29574 T (Tag_also_compatible_with),
29575 T (Tag_conformance),
29576 T (Tag_T2EE_use),
29577 T (Tag_Virtualization_use),
29578 T (Tag_DSP_extension),
29579 T (Tag_MVE_arch),
29580 /* We deliberately do not include Tag_MPextension_use_legacy. */
29581 #undef T
29582 };
29583 unsigned int i;
29584
29585 if (name == NULL)
29586 return -1;
29587
29588 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
29589 if (streq (name, attribute_table[i].name))
29590 return attribute_table[i].tag;
29591
29592 return -1;
29593 }
29594
29595 /* Apply sym value for relocations only in the case that they are for
29596 local symbols in the same segment as the fixup and you have the
29597 respective architectural feature for blx and simple switches. */
29598
29599 int
29600 arm_apply_sym_value (struct fix * fixP, segT this_seg)
29601 {
29602 if (fixP->fx_addsy
29603 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
29604 /* PR 17444: If the local symbol is in a different section then a reloc
29605 will always be generated for it, so applying the symbol value now
29606 will result in a double offset being stored in the relocation. */
29607 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
29608 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
29609 {
29610 switch (fixP->fx_r_type)
29611 {
29612 case BFD_RELOC_ARM_PCREL_BLX:
29613 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29614 if (ARM_IS_FUNC (fixP->fx_addsy))
29615 return 1;
29616 break;
29617
29618 case BFD_RELOC_ARM_PCREL_CALL:
29619 case BFD_RELOC_THUMB_PCREL_BLX:
29620 if (THUMB_IS_FUNC (fixP->fx_addsy))
29621 return 1;
29622 break;
29623
29624 default:
29625 break;
29626 }
29627
29628 }
29629 return 0;
29630 }
29631 #endif /* OBJ_ELF */
This page took 0.98023 seconds and 4 git commands to generate.