[binutils, ARM, 13/16] Add support for CLRM
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 #ifdef OBJ_ELF
306 static const arm_feature_set fpu_vfp_fp16 =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
308 static const arm_feature_set fpu_neon_ext_fma =
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
310 #endif
311 static const arm_feature_set fpu_vfp_ext_fma =
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
313 static const arm_feature_set fpu_vfp_ext_armv8 =
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
315 static const arm_feature_set fpu_vfp_ext_armv8xd =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
317 static const arm_feature_set fpu_neon_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
319 static const arm_feature_set fpu_crypto_ext_armv8 =
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
321 static const arm_feature_set crc_ext_armv8 =
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
323 static const arm_feature_set fpu_neon_ext_v8_1 =
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
325 static const arm_feature_set fpu_neon_ext_dotprod =
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
327
328 static int mfloat_abi_opt = -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
330 directive. */
331 static arm_feature_set selected_arch = ARM_ARCH_NONE;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
333 directive. */
334 static arm_feature_set selected_ext = ARM_ARCH_NONE;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
337 directive. */
338 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu = FPU_NONE;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name[20];
345
346 extern FLONUM_TYPE generic_floating_point_number;
347
348 /* Return if no cpu was selected on command-line. */
349 static bfd_boolean
350 no_cpu_selected (void)
351 {
352 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
353 }
354
355 #ifdef OBJ_ELF
356 # ifdef EABI_DEFAULT
357 static int meabi_flags = EABI_DEFAULT;
358 # else
359 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
360 # endif
361
362 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
363
364 bfd_boolean
365 arm_is_eabi (void)
366 {
367 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
368 }
369 #endif
370
371 #ifdef OBJ_ELF
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS * GOT_symbol;
374 #endif
375
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
379 instructions. */
380 static int thumb_mode = 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
385
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
388 {
389 IMPLICIT_IT_MODE_NEVER = 0x00,
390 IMPLICIT_IT_MODE_ARM = 0x01,
391 IMPLICIT_IT_MODE_THUMB = 0x02,
392 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
393 };
394 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
395
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
398
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
403 there.)
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
406 machine code.
407
408 Important differences from the old Thumb mode:
409
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
418
419 static bfd_boolean unified_syntax = FALSE;
420
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars[] = "#[]{}";
426
427 enum neon_el_type
428 {
429 NT_invtype,
430 NT_untyped,
431 NT_integer,
432 NT_float,
433 NT_poly,
434 NT_signed,
435 NT_unsigned
436 };
437
438 struct neon_type_el
439 {
440 enum neon_el_type type;
441 unsigned size;
442 };
443
444 #define NEON_MAX_TYPE_ELS 4
445
446 struct neon_type
447 {
448 struct neon_type_el el[NEON_MAX_TYPE_ELS];
449 unsigned elems;
450 };
451
452 enum it_instruction_type
453 {
454 OUTSIDE_IT_INSN,
455 INSIDE_IT_INSN,
456 INSIDE_IT_LAST_INSN,
457 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN /* The IT insn has been parsed. */
462 };
463
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
467
468 struct arm_it
469 {
470 const char * error;
471 unsigned long instruction;
472 int size;
473 int size_req;
474 int cond;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
477 appropriate. */
478 int uncond_value;
479 struct neon_type vectype;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
482 int is_neon;
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
485 unsigned long relax;
486 struct
487 {
488 bfd_reloc_code_real_type type;
489 expressionS exp;
490 int pc_rel;
491 } relocs[ARM_IT_MAX_RELOCS];
492
493 enum it_instruction_type it_insn_type;
494
495 struct
496 {
497 unsigned reg;
498 signed int imm;
499 struct neon_type_el vectype;
500 unsigned present : 1; /* Operand present. */
501 unsigned isreg : 1; /* Operand was a register. */
502 unsigned immisreg : 1; /* .imm field is a second register. */
503 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle : 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc : 1; /* Operand has relocation suffix. */
513 unsigned writeback : 1; /* Operand has trailing ! */
514 unsigned preind : 1; /* Preindexed address. */
515 unsigned postind : 1; /* Postindexed address. */
516 unsigned negative : 1; /* Index register was negated. */
517 unsigned shifted : 1; /* Shift applied to operation. */
518 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
519 } operands[ARM_IT_MAX_OPERANDS];
520 };
521
522 static struct arm_it inst;
523
524 #define NUM_FLOAT_VALS 8
525
526 const char * fp_const[] =
527 {
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
529 };
530
531 /* Number of littlenums required to hold an extended precision number. */
532 #define MAX_LITTLENUMS 6
533
534 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
535
536 #define FAIL (-1)
537 #define SUCCESS (0)
538
539 #define SUFF_S 1
540 #define SUFF_D 2
541 #define SUFF_E 3
542 #define SUFF_P 4
543
544 #define CP_T_X 0x00008000
545 #define CP_T_Y 0x00400000
546
547 #define CONDS_BIT 0x00100000
548 #define LOAD_BIT 0x00100000
549
550 #define DOUBLE_LOAD_FLAG 0x00000001
551
552 struct asm_cond
553 {
554 const char * template_name;
555 unsigned long value;
556 };
557
558 #define COND_ALWAYS 0xE
559
560 struct asm_psr
561 {
562 const char * template_name;
563 unsigned long field;
564 };
565
566 struct asm_barrier_opt
567 {
568 const char * template_name;
569 unsigned long value;
570 const arm_feature_set arch;
571 };
572
573 /* The bit that distinguishes CPSR and SPSR. */
574 #define SPSR_BIT (1 << 22)
575
576 /* The individual PSR flag bits. */
577 #define PSR_c (1 << 16)
578 #define PSR_x (1 << 17)
579 #define PSR_s (1 << 18)
580 #define PSR_f (1 << 19)
581
582 struct reloc_entry
583 {
584 const char * name;
585 bfd_reloc_code_real_type reloc;
586 };
587
588 enum vfp_reg_pos
589 {
590 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
591 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
592 };
593
594 enum vfp_ldstm_type
595 {
596 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
597 };
598
599 /* Bits for DEFINED field in neon_typed_alias. */
600 #define NTA_HASTYPE 1
601 #define NTA_HASINDEX 2
602
603 struct neon_typed_alias
604 {
605 unsigned char defined;
606 unsigned char index;
607 struct neon_type_el eltype;
608 };
609
610 /* ARM register categories. This includes coprocessor numbers and various
611 architecture extensions' registers. Each entry should have an error message
612 in reg_expected_msgs below. */
613 enum arm_reg_type
614 {
615 REG_TYPE_RN,
616 REG_TYPE_CP,
617 REG_TYPE_CN,
618 REG_TYPE_FN,
619 REG_TYPE_VFS,
620 REG_TYPE_VFD,
621 REG_TYPE_NQ,
622 REG_TYPE_VFSD,
623 REG_TYPE_NDQ,
624 REG_TYPE_NSD,
625 REG_TYPE_NSDQ,
626 REG_TYPE_VFC,
627 REG_TYPE_MVF,
628 REG_TYPE_MVD,
629 REG_TYPE_MVFX,
630 REG_TYPE_MVDX,
631 REG_TYPE_MVAX,
632 REG_TYPE_DSPSC,
633 REG_TYPE_MMXWR,
634 REG_TYPE_MMXWC,
635 REG_TYPE_MMXWCG,
636 REG_TYPE_XSCALE,
637 REG_TYPE_RNB
638 };
639
640 /* Structure for a hash table entry for a register.
641 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
642 information which states whether a vector type or index is specified (for a
643 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
644 struct reg_entry
645 {
646 const char * name;
647 unsigned int number;
648 unsigned char type;
649 unsigned char builtin;
650 struct neon_typed_alias * neon;
651 };
652
653 /* Diagnostics used when we don't get a register of the expected type. */
654 const char * const reg_expected_msgs[] =
655 {
656 [REG_TYPE_RN] = N_("ARM register expected"),
657 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
658 [REG_TYPE_CN] = N_("co-processor register expected"),
659 [REG_TYPE_FN] = N_("FPA register expected"),
660 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
661 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
662 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
663 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
664 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
665 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
666 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
667 " expected"),
668 [REG_TYPE_VFC] = N_("VFP system register expected"),
669 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
670 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
671 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
672 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
673 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
674 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
675 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
676 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
677 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
678 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
679 [REG_TYPE_RNB] = N_("")
680 };
681
682 /* Some well known registers that we refer to directly elsewhere. */
683 #define REG_R12 12
684 #define REG_SP 13
685 #define REG_LR 14
686 #define REG_PC 15
687
688 /* ARM instructions take 4bytes in the object file, Thumb instructions
689 take 2: */
690 #define INSN_SIZE 4
691
692 struct asm_opcode
693 {
694 /* Basic string to match. */
695 const char * template_name;
696
697 /* Parameters to instruction. */
698 unsigned int operands[8];
699
700 /* Conditional tag - see opcode_lookup. */
701 unsigned int tag : 4;
702
703 /* Basic instruction code. */
704 unsigned int avalue : 28;
705
706 /* Thumb-format instruction code. */
707 unsigned int tvalue;
708
709 /* Which architecture variant provides this instruction. */
710 const arm_feature_set * avariant;
711 const arm_feature_set * tvariant;
712
713 /* Function to call to encode instruction in ARM format. */
714 void (* aencode) (void);
715
716 /* Function to call to encode instruction in Thumb format. */
717 void (* tencode) (void);
718 };
719
720 /* Defines for various bits that we will want to toggle. */
721 #define INST_IMMEDIATE 0x02000000
722 #define OFFSET_REG 0x02000000
723 #define HWOFFSET_IMM 0x00400000
724 #define SHIFT_BY_REG 0x00000010
725 #define PRE_INDEX 0x01000000
726 #define INDEX_UP 0x00800000
727 #define WRITE_BACK 0x00200000
728 #define LDM_TYPE_2_OR_3 0x00400000
729 #define CPSI_MMOD 0x00020000
730
731 #define LITERAL_MASK 0xf000f000
732 #define OPCODE_MASK 0xfe1fffff
733 #define V4_STR_BIT 0x00000020
734 #define VLDR_VMOV_SAME 0x0040f000
735
736 #define T2_SUBS_PC_LR 0xf3de8f00
737
738 #define DATA_OP_SHIFT 21
739 #define SBIT_SHIFT 20
740
741 #define T2_OPCODE_MASK 0xfe1fffff
742 #define T2_DATA_OP_SHIFT 21
743 #define T2_SBIT_SHIFT 20
744
745 #define A_COND_MASK 0xf0000000
746 #define A_PUSH_POP_OP_MASK 0x0fff0000
747
748 /* Opcodes for pushing/poping registers to/from the stack. */
749 #define A1_OPCODE_PUSH 0x092d0000
750 #define A2_OPCODE_PUSH 0x052d0004
751 #define A2_OPCODE_POP 0x049d0004
752
753 /* Codes to distinguish the arithmetic instructions. */
754 #define OPCODE_AND 0
755 #define OPCODE_EOR 1
756 #define OPCODE_SUB 2
757 #define OPCODE_RSB 3
758 #define OPCODE_ADD 4
759 #define OPCODE_ADC 5
760 #define OPCODE_SBC 6
761 #define OPCODE_RSC 7
762 #define OPCODE_TST 8
763 #define OPCODE_TEQ 9
764 #define OPCODE_CMP 10
765 #define OPCODE_CMN 11
766 #define OPCODE_ORR 12
767 #define OPCODE_MOV 13
768 #define OPCODE_BIC 14
769 #define OPCODE_MVN 15
770
771 #define T2_OPCODE_AND 0
772 #define T2_OPCODE_BIC 1
773 #define T2_OPCODE_ORR 2
774 #define T2_OPCODE_ORN 3
775 #define T2_OPCODE_EOR 4
776 #define T2_OPCODE_ADD 8
777 #define T2_OPCODE_ADC 10
778 #define T2_OPCODE_SBC 11
779 #define T2_OPCODE_SUB 13
780 #define T2_OPCODE_RSB 14
781
782 #define T_OPCODE_MUL 0x4340
783 #define T_OPCODE_TST 0x4200
784 #define T_OPCODE_CMN 0x42c0
785 #define T_OPCODE_NEG 0x4240
786 #define T_OPCODE_MVN 0x43c0
787
788 #define T_OPCODE_ADD_R3 0x1800
789 #define T_OPCODE_SUB_R3 0x1a00
790 #define T_OPCODE_ADD_HI 0x4400
791 #define T_OPCODE_ADD_ST 0xb000
792 #define T_OPCODE_SUB_ST 0xb080
793 #define T_OPCODE_ADD_SP 0xa800
794 #define T_OPCODE_ADD_PC 0xa000
795 #define T_OPCODE_ADD_I8 0x3000
796 #define T_OPCODE_SUB_I8 0x3800
797 #define T_OPCODE_ADD_I3 0x1c00
798 #define T_OPCODE_SUB_I3 0x1e00
799
800 #define T_OPCODE_ASR_R 0x4100
801 #define T_OPCODE_LSL_R 0x4080
802 #define T_OPCODE_LSR_R 0x40c0
803 #define T_OPCODE_ROR_R 0x41c0
804 #define T_OPCODE_ASR_I 0x1000
805 #define T_OPCODE_LSL_I 0x0000
806 #define T_OPCODE_LSR_I 0x0800
807
808 #define T_OPCODE_MOV_I8 0x2000
809 #define T_OPCODE_CMP_I8 0x2800
810 #define T_OPCODE_CMP_LR 0x4280
811 #define T_OPCODE_MOV_HR 0x4600
812 #define T_OPCODE_CMP_HR 0x4500
813
814 #define T_OPCODE_LDR_PC 0x4800
815 #define T_OPCODE_LDR_SP 0x9800
816 #define T_OPCODE_STR_SP 0x9000
817 #define T_OPCODE_LDR_IW 0x6800
818 #define T_OPCODE_STR_IW 0x6000
819 #define T_OPCODE_LDR_IH 0x8800
820 #define T_OPCODE_STR_IH 0x8000
821 #define T_OPCODE_LDR_IB 0x7800
822 #define T_OPCODE_STR_IB 0x7000
823 #define T_OPCODE_LDR_RW 0x5800
824 #define T_OPCODE_STR_RW 0x5000
825 #define T_OPCODE_LDR_RH 0x5a00
826 #define T_OPCODE_STR_RH 0x5200
827 #define T_OPCODE_LDR_RB 0x5c00
828 #define T_OPCODE_STR_RB 0x5400
829
830 #define T_OPCODE_PUSH 0xb400
831 #define T_OPCODE_POP 0xbc00
832
833 #define T_OPCODE_BRANCH 0xe000
834
835 #define THUMB_SIZE 2 /* Size of thumb instruction. */
836 #define THUMB_PP_PC_LR 0x0100
837 #define THUMB_LOAD_BIT 0x0800
838 #define THUMB2_LOAD_BIT 0x00100000
839
840 #define BAD_ARGS _("bad arguments to instruction")
841 #define BAD_SP _("r13 not allowed here")
842 #define BAD_PC _("r15 not allowed here")
843 #define BAD_COND _("instruction cannot be conditional")
844 #define BAD_OVERLAP _("registers may not be the same")
845 #define BAD_HIREG _("lo register required")
846 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
847 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
848 #define BAD_BRANCH _("branch must be last instruction in IT block")
849 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
850 #define BAD_NOT_IT _("instruction not allowed in IT block")
851 #define BAD_FPU _("selected FPU does not support instruction")
852 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
853 #define BAD_IT_COND _("incorrect condition in IT block")
854 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
855 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
856 #define BAD_PC_ADDRESSING \
857 _("cannot use register index with PC-relative addressing")
858 #define BAD_PC_WRITEBACK \
859 _("cannot use writeback with PC-relative addressing")
860 #define BAD_RANGE _("branch out of range")
861 #define BAD_FP16 _("selected processor does not support fp16 instruction")
862 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
863 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
864
865 static struct hash_control * arm_ops_hsh;
866 static struct hash_control * arm_cond_hsh;
867 static struct hash_control * arm_shift_hsh;
868 static struct hash_control * arm_psr_hsh;
869 static struct hash_control * arm_v7m_psr_hsh;
870 static struct hash_control * arm_reg_hsh;
871 static struct hash_control * arm_reloc_hsh;
872 static struct hash_control * arm_barrier_opt_hsh;
873
874 /* Stuff needed to resolve the label ambiguity
875 As:
876 ...
877 label: <insn>
878 may differ from:
879 ...
880 label:
881 <insn> */
882
883 symbolS * last_label_seen;
884 static int label_is_thumb_function_name = FALSE;
885
886 /* Literal pool structure. Held on a per-section
887 and per-sub-section basis. */
888
889 #define MAX_LITERAL_POOL_SIZE 1024
890 typedef struct literal_pool
891 {
892 expressionS literals [MAX_LITERAL_POOL_SIZE];
893 unsigned int next_free_entry;
894 unsigned int id;
895 symbolS * symbol;
896 segT section;
897 subsegT sub_section;
898 #ifdef OBJ_ELF
899 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
900 #endif
901 struct literal_pool * next;
902 unsigned int alignment;
903 } literal_pool;
904
905 /* Pointer to a linked list of literal pools. */
906 literal_pool * list_of_pools = NULL;
907
908 typedef enum asmfunc_states
909 {
910 OUTSIDE_ASMFUNC,
911 WAITING_ASMFUNC_NAME,
912 WAITING_ENDASMFUNC
913 } asmfunc_states;
914
915 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
916
917 #ifdef OBJ_ELF
918 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
919 #else
920 static struct current_it now_it;
921 #endif
922
923 static inline int
924 now_it_compatible (int cond)
925 {
926 return (cond & ~1) == (now_it.cc & ~1);
927 }
928
929 static inline int
930 conditional_insn (void)
931 {
932 return inst.cond != COND_ALWAYS;
933 }
934
935 static int in_it_block (void);
936
937 static int handle_it_state (void);
938
939 static void force_automatic_it_block_close (void);
940
941 static void it_fsm_post_encode (void);
942
943 #define set_it_insn_type(type) \
944 do \
945 { \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
948 return; \
949 } \
950 while (0)
951
952 #define set_it_insn_type_nonvoid(type, failret) \
953 do \
954 { \
955 inst.it_insn_type = type; \
956 if (handle_it_state () == FAIL) \
957 return failret; \
958 } \
959 while(0)
960
961 #define set_it_insn_type_last() \
962 do \
963 { \
964 if (inst.cond == COND_ALWAYS) \
965 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
966 else \
967 set_it_insn_type (INSIDE_IT_LAST_INSN); \
968 } \
969 while (0)
970
971 /* Pure syntax. */
972
973 /* This array holds the chars that always start a comment. If the
974 pre-processor is disabled, these aren't very useful. */
975 char arm_comment_chars[] = "@";
976
977 /* This array holds the chars that only start a comment at the beginning of
978 a line. If the line seems to have the form '# 123 filename'
979 .line and .file directives will appear in the pre-processed output. */
980 /* Note that input_file.c hand checks for '#' at the beginning of the
981 first line of the input file. This is because the compiler outputs
982 #NO_APP at the beginning of its output. */
983 /* Also note that comments like this one will always work. */
984 const char line_comment_chars[] = "#";
985
986 char arm_line_separator_chars[] = ";";
987
988 /* Chars that can be used to separate mant
989 from exp in floating point numbers. */
990 const char EXP_CHARS[] = "eE";
991
992 /* Chars that mean this number is a floating point constant. */
993 /* As in 0f12.456 */
994 /* or 0d1.2345e12 */
995
996 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
997
998 /* Prefix characters that indicate the start of an immediate
999 value. */
1000 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1001
1002 /* Separator character handling. */
1003
1004 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1005
1006 static inline int
1007 skip_past_char (char ** str, char c)
1008 {
1009 /* PR gas/14987: Allow for whitespace before the expected character. */
1010 skip_whitespace (*str);
1011
1012 if (**str == c)
1013 {
1014 (*str)++;
1015 return SUCCESS;
1016 }
1017 else
1018 return FAIL;
1019 }
1020
1021 #define skip_past_comma(str) skip_past_char (str, ',')
1022
1023 /* Arithmetic expressions (possibly involving symbols). */
1024
1025 /* Return TRUE if anything in the expression is a bignum. */
1026
1027 static bfd_boolean
1028 walk_no_bignums (symbolS * sp)
1029 {
1030 if (symbol_get_value_expression (sp)->X_op == O_big)
1031 return TRUE;
1032
1033 if (symbol_get_value_expression (sp)->X_add_symbol)
1034 {
1035 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1036 || (symbol_get_value_expression (sp)->X_op_symbol
1037 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1038 }
1039
1040 return FALSE;
1041 }
1042
1043 static bfd_boolean in_my_get_expression = FALSE;
1044
1045 /* Third argument to my_get_expression. */
1046 #define GE_NO_PREFIX 0
1047 #define GE_IMM_PREFIX 1
1048 #define GE_OPT_PREFIX 2
1049 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1050 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1051 #define GE_OPT_PREFIX_BIG 3
1052
1053 static int
1054 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1055 {
1056 char * save_in;
1057
1058 /* In unified syntax, all prefixes are optional. */
1059 if (unified_syntax)
1060 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1061 : GE_OPT_PREFIX;
1062
1063 switch (prefix_mode)
1064 {
1065 case GE_NO_PREFIX: break;
1066 case GE_IMM_PREFIX:
1067 if (!is_immediate_prefix (**str))
1068 {
1069 inst.error = _("immediate expression requires a # prefix");
1070 return FAIL;
1071 }
1072 (*str)++;
1073 break;
1074 case GE_OPT_PREFIX:
1075 case GE_OPT_PREFIX_BIG:
1076 if (is_immediate_prefix (**str))
1077 (*str)++;
1078 break;
1079 default:
1080 abort ();
1081 }
1082
1083 memset (ep, 0, sizeof (expressionS));
1084
1085 save_in = input_line_pointer;
1086 input_line_pointer = *str;
1087 in_my_get_expression = TRUE;
1088 expression (ep);
1089 in_my_get_expression = FALSE;
1090
1091 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1092 {
1093 /* We found a bad or missing expression in md_operand(). */
1094 *str = input_line_pointer;
1095 input_line_pointer = save_in;
1096 if (inst.error == NULL)
1097 inst.error = (ep->X_op == O_absent
1098 ? _("missing expression") :_("bad expression"));
1099 return 1;
1100 }
1101
1102 /* Get rid of any bignums now, so that we don't generate an error for which
1103 we can't establish a line number later on. Big numbers are never valid
1104 in instructions, which is where this routine is always called. */
1105 if (prefix_mode != GE_OPT_PREFIX_BIG
1106 && (ep->X_op == O_big
1107 || (ep->X_add_symbol
1108 && (walk_no_bignums (ep->X_add_symbol)
1109 || (ep->X_op_symbol
1110 && walk_no_bignums (ep->X_op_symbol))))))
1111 {
1112 inst.error = _("invalid constant");
1113 *str = input_line_pointer;
1114 input_line_pointer = save_in;
1115 return 1;
1116 }
1117
1118 *str = input_line_pointer;
1119 input_line_pointer = save_in;
1120 return SUCCESS;
1121 }
1122
1123 /* Turn a string in input_line_pointer into a floating point constant
1124 of type TYPE, and store the appropriate bytes in *LITP. The number
1125 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1126 returned, or NULL on OK.
1127
1128 Note that fp constants aren't represent in the normal way on the ARM.
1129 In big endian mode, things are as expected. However, in little endian
1130 mode fp constants are big-endian word-wise, and little-endian byte-wise
1131 within the words. For example, (double) 1.1 in big endian mode is
1132 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1133 the byte sequence 99 99 f1 3f 9a 99 99 99.
1134
1135 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1136
1137 const char *
1138 md_atof (int type, char * litP, int * sizeP)
1139 {
1140 int prec;
1141 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1142 char *t;
1143 int i;
1144
1145 switch (type)
1146 {
1147 case 'f':
1148 case 'F':
1149 case 's':
1150 case 'S':
1151 prec = 2;
1152 break;
1153
1154 case 'd':
1155 case 'D':
1156 case 'r':
1157 case 'R':
1158 prec = 4;
1159 break;
1160
1161 case 'x':
1162 case 'X':
1163 prec = 5;
1164 break;
1165
1166 case 'p':
1167 case 'P':
1168 prec = 5;
1169 break;
1170
1171 default:
1172 *sizeP = 0;
1173 return _("Unrecognized or unsupported floating point constant");
1174 }
1175
1176 t = atof_ieee (input_line_pointer, type, words);
1177 if (t)
1178 input_line_pointer = t;
1179 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1180
1181 if (target_big_endian)
1182 {
1183 for (i = 0; i < prec; i++)
1184 {
1185 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1186 litP += sizeof (LITTLENUM_TYPE);
1187 }
1188 }
1189 else
1190 {
1191 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1192 for (i = prec - 1; i >= 0; i--)
1193 {
1194 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1195 litP += sizeof (LITTLENUM_TYPE);
1196 }
1197 else
1198 /* For a 4 byte float the order of elements in `words' is 1 0.
1199 For an 8 byte float the order is 1 0 3 2. */
1200 for (i = 0; i < prec; i += 2)
1201 {
1202 md_number_to_chars (litP, (valueT) words[i + 1],
1203 sizeof (LITTLENUM_TYPE));
1204 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1205 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1206 litP += 2 * sizeof (LITTLENUM_TYPE);
1207 }
1208 }
1209
1210 return NULL;
1211 }
1212
1213 /* We handle all bad expressions here, so that we can report the faulty
1214 instruction in the error message. */
1215
1216 void
1217 md_operand (expressionS * exp)
1218 {
1219 if (in_my_get_expression)
1220 exp->X_op = O_illegal;
1221 }
1222
1223 /* Immediate values. */
1224
1225 #ifdef OBJ_ELF
1226 /* Generic immediate-value read function for use in directives.
1227 Accepts anything that 'expression' can fold to a constant.
1228 *val receives the number. */
1229
1230 static int
1231 immediate_for_directive (int *val)
1232 {
1233 expressionS exp;
1234 exp.X_op = O_illegal;
1235
1236 if (is_immediate_prefix (*input_line_pointer))
1237 {
1238 input_line_pointer++;
1239 expression (&exp);
1240 }
1241
1242 if (exp.X_op != O_constant)
1243 {
1244 as_bad (_("expected #constant"));
1245 ignore_rest_of_line ();
1246 return FAIL;
1247 }
1248 *val = exp.X_add_number;
1249 return SUCCESS;
1250 }
1251 #endif
1252
1253 /* Register parsing. */
1254
1255 /* Generic register parser. CCP points to what should be the
1256 beginning of a register name. If it is indeed a valid register
1257 name, advance CCP over it and return the reg_entry structure;
1258 otherwise return NULL. Does not issue diagnostics. */
1259
1260 static struct reg_entry *
1261 arm_reg_parse_multi (char **ccp)
1262 {
1263 char *start = *ccp;
1264 char *p;
1265 struct reg_entry *reg;
1266
1267 skip_whitespace (start);
1268
1269 #ifdef REGISTER_PREFIX
1270 if (*start != REGISTER_PREFIX)
1271 return NULL;
1272 start++;
1273 #endif
1274 #ifdef OPTIONAL_REGISTER_PREFIX
1275 if (*start == OPTIONAL_REGISTER_PREFIX)
1276 start++;
1277 #endif
1278
1279 p = start;
1280 if (!ISALPHA (*p) || !is_name_beginner (*p))
1281 return NULL;
1282
1283 do
1284 p++;
1285 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1286
1287 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1288
1289 if (!reg)
1290 return NULL;
1291
1292 *ccp = p;
1293 return reg;
1294 }
1295
1296 static int
1297 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1298 enum arm_reg_type type)
1299 {
1300 /* Alternative syntaxes are accepted for a few register classes. */
1301 switch (type)
1302 {
1303 case REG_TYPE_MVF:
1304 case REG_TYPE_MVD:
1305 case REG_TYPE_MVFX:
1306 case REG_TYPE_MVDX:
1307 /* Generic coprocessor register names are allowed for these. */
1308 if (reg && reg->type == REG_TYPE_CN)
1309 return reg->number;
1310 break;
1311
1312 case REG_TYPE_CP:
1313 /* For backward compatibility, a bare number is valid here. */
1314 {
1315 unsigned long processor = strtoul (start, ccp, 10);
1316 if (*ccp != start && processor <= 15)
1317 return processor;
1318 }
1319 /* Fall through. */
1320
1321 case REG_TYPE_MMXWC:
1322 /* WC includes WCG. ??? I'm not sure this is true for all
1323 instructions that take WC registers. */
1324 if (reg && reg->type == REG_TYPE_MMXWCG)
1325 return reg->number;
1326 break;
1327
1328 default:
1329 break;
1330 }
1331
1332 return FAIL;
1333 }
1334
1335 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1336 return value is the register number or FAIL. */
1337
1338 static int
1339 arm_reg_parse (char **ccp, enum arm_reg_type type)
1340 {
1341 char *start = *ccp;
1342 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1343 int ret;
1344
1345 /* Do not allow a scalar (reg+index) to parse as a register. */
1346 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1347 return FAIL;
1348
1349 if (reg && reg->type == type)
1350 return reg->number;
1351
1352 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1353 return ret;
1354
1355 *ccp = start;
1356 return FAIL;
1357 }
1358
1359 /* Parse a Neon type specifier. *STR should point at the leading '.'
1360 character. Does no verification at this stage that the type fits the opcode
1361 properly. E.g.,
1362
1363 .i32.i32.s16
1364 .s32.f32
1365 .u16
1366
1367 Can all be legally parsed by this function.
1368
1369 Fills in neon_type struct pointer with parsed information, and updates STR
1370 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1371 type, FAIL if not. */
1372
1373 static int
1374 parse_neon_type (struct neon_type *type, char **str)
1375 {
1376 char *ptr = *str;
1377
1378 if (type)
1379 type->elems = 0;
1380
1381 while (type->elems < NEON_MAX_TYPE_ELS)
1382 {
1383 enum neon_el_type thistype = NT_untyped;
1384 unsigned thissize = -1u;
1385
1386 if (*ptr != '.')
1387 break;
1388
1389 ptr++;
1390
1391 /* Just a size without an explicit type. */
1392 if (ISDIGIT (*ptr))
1393 goto parsesize;
1394
1395 switch (TOLOWER (*ptr))
1396 {
1397 case 'i': thistype = NT_integer; break;
1398 case 'f': thistype = NT_float; break;
1399 case 'p': thistype = NT_poly; break;
1400 case 's': thistype = NT_signed; break;
1401 case 'u': thistype = NT_unsigned; break;
1402 case 'd':
1403 thistype = NT_float;
1404 thissize = 64;
1405 ptr++;
1406 goto done;
1407 default:
1408 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1409 return FAIL;
1410 }
1411
1412 ptr++;
1413
1414 /* .f is an abbreviation for .f32. */
1415 if (thistype == NT_float && !ISDIGIT (*ptr))
1416 thissize = 32;
1417 else
1418 {
1419 parsesize:
1420 thissize = strtoul (ptr, &ptr, 10);
1421
1422 if (thissize != 8 && thissize != 16 && thissize != 32
1423 && thissize != 64)
1424 {
1425 as_bad (_("bad size %d in type specifier"), thissize);
1426 return FAIL;
1427 }
1428 }
1429
1430 done:
1431 if (type)
1432 {
1433 type->el[type->elems].type = thistype;
1434 type->el[type->elems].size = thissize;
1435 type->elems++;
1436 }
1437 }
1438
1439 /* Empty/missing type is not a successful parse. */
1440 if (type->elems == 0)
1441 return FAIL;
1442
1443 *str = ptr;
1444
1445 return SUCCESS;
1446 }
1447
1448 /* Errors may be set multiple times during parsing or bit encoding
1449 (particularly in the Neon bits), but usually the earliest error which is set
1450 will be the most meaningful. Avoid overwriting it with later (cascading)
1451 errors by calling this function. */
1452
1453 static void
1454 first_error (const char *err)
1455 {
1456 if (!inst.error)
1457 inst.error = err;
1458 }
1459
1460 /* Parse a single type, e.g. ".s32", leading period included. */
1461 static int
1462 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1463 {
1464 char *str = *ccp;
1465 struct neon_type optype;
1466
1467 if (*str == '.')
1468 {
1469 if (parse_neon_type (&optype, &str) == SUCCESS)
1470 {
1471 if (optype.elems == 1)
1472 *vectype = optype.el[0];
1473 else
1474 {
1475 first_error (_("only one type should be specified for operand"));
1476 return FAIL;
1477 }
1478 }
1479 else
1480 {
1481 first_error (_("vector type expected"));
1482 return FAIL;
1483 }
1484 }
1485 else
1486 return FAIL;
1487
1488 *ccp = str;
1489
1490 return SUCCESS;
1491 }
1492
1493 /* Special meanings for indices (which have a range of 0-7), which will fit into
1494 a 4-bit integer. */
1495
1496 #define NEON_ALL_LANES 15
1497 #define NEON_INTERLEAVE_LANES 14
1498
1499 /* Parse either a register or a scalar, with an optional type. Return the
1500 register number, and optionally fill in the actual type of the register
1501 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1502 type/index information in *TYPEINFO. */
1503
1504 static int
1505 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1506 enum arm_reg_type *rtype,
1507 struct neon_typed_alias *typeinfo)
1508 {
1509 char *str = *ccp;
1510 struct reg_entry *reg = arm_reg_parse_multi (&str);
1511 struct neon_typed_alias atype;
1512 struct neon_type_el parsetype;
1513
1514 atype.defined = 0;
1515 atype.index = -1;
1516 atype.eltype.type = NT_invtype;
1517 atype.eltype.size = -1;
1518
1519 /* Try alternate syntax for some types of register. Note these are mutually
1520 exclusive with the Neon syntax extensions. */
1521 if (reg == NULL)
1522 {
1523 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1524 if (altreg != FAIL)
1525 *ccp = str;
1526 if (typeinfo)
1527 *typeinfo = atype;
1528 return altreg;
1529 }
1530
1531 /* Undo polymorphism when a set of register types may be accepted. */
1532 if ((type == REG_TYPE_NDQ
1533 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1534 || (type == REG_TYPE_VFSD
1535 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1536 || (type == REG_TYPE_NSDQ
1537 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1538 || reg->type == REG_TYPE_NQ))
1539 || (type == REG_TYPE_NSD
1540 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1541 || (type == REG_TYPE_MMXWC
1542 && (reg->type == REG_TYPE_MMXWCG)))
1543 type = (enum arm_reg_type) reg->type;
1544
1545 if (type != reg->type)
1546 return FAIL;
1547
1548 if (reg->neon)
1549 atype = *reg->neon;
1550
1551 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1552 {
1553 if ((atype.defined & NTA_HASTYPE) != 0)
1554 {
1555 first_error (_("can't redefine type for operand"));
1556 return FAIL;
1557 }
1558 atype.defined |= NTA_HASTYPE;
1559 atype.eltype = parsetype;
1560 }
1561
1562 if (skip_past_char (&str, '[') == SUCCESS)
1563 {
1564 if (type != REG_TYPE_VFD
1565 && !(type == REG_TYPE_VFS
1566 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1567 {
1568 first_error (_("only D registers may be indexed"));
1569 return FAIL;
1570 }
1571
1572 if ((atype.defined & NTA_HASINDEX) != 0)
1573 {
1574 first_error (_("can't change index for operand"));
1575 return FAIL;
1576 }
1577
1578 atype.defined |= NTA_HASINDEX;
1579
1580 if (skip_past_char (&str, ']') == SUCCESS)
1581 atype.index = NEON_ALL_LANES;
1582 else
1583 {
1584 expressionS exp;
1585
1586 my_get_expression (&exp, &str, GE_NO_PREFIX);
1587
1588 if (exp.X_op != O_constant)
1589 {
1590 first_error (_("constant expression required"));
1591 return FAIL;
1592 }
1593
1594 if (skip_past_char (&str, ']') == FAIL)
1595 return FAIL;
1596
1597 atype.index = exp.X_add_number;
1598 }
1599 }
1600
1601 if (typeinfo)
1602 *typeinfo = atype;
1603
1604 if (rtype)
1605 *rtype = type;
1606
1607 *ccp = str;
1608
1609 return reg->number;
1610 }
1611
1612 /* Like arm_reg_parse, but allow allow the following extra features:
1613 - If RTYPE is non-zero, return the (possibly restricted) type of the
1614 register (e.g. Neon double or quad reg when either has been requested).
1615 - If this is a Neon vector type with additional type information, fill
1616 in the struct pointed to by VECTYPE (if non-NULL).
1617 This function will fault on encountering a scalar. */
1618
1619 static int
1620 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1621 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1622 {
1623 struct neon_typed_alias atype;
1624 char *str = *ccp;
1625 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1626
1627 if (reg == FAIL)
1628 return FAIL;
1629
1630 /* Do not allow regname(... to parse as a register. */
1631 if (*str == '(')
1632 return FAIL;
1633
1634 /* Do not allow a scalar (reg+index) to parse as a register. */
1635 if ((atype.defined & NTA_HASINDEX) != 0)
1636 {
1637 first_error (_("register operand expected, but got scalar"));
1638 return FAIL;
1639 }
1640
1641 if (vectype)
1642 *vectype = atype.eltype;
1643
1644 *ccp = str;
1645
1646 return reg;
1647 }
1648
1649 #define NEON_SCALAR_REG(X) ((X) >> 4)
1650 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1651
1652 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1653 have enough information to be able to do a good job bounds-checking. So, we
1654 just do easy checks here, and do further checks later. */
1655
1656 static int
1657 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1658 {
1659 int reg;
1660 char *str = *ccp;
1661 struct neon_typed_alias atype;
1662 enum arm_reg_type reg_type = REG_TYPE_VFD;
1663
1664 if (elsize == 4)
1665 reg_type = REG_TYPE_VFS;
1666
1667 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1668
1669 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1670 return FAIL;
1671
1672 if (atype.index == NEON_ALL_LANES)
1673 {
1674 first_error (_("scalar must have an index"));
1675 return FAIL;
1676 }
1677 else if (atype.index >= 64 / elsize)
1678 {
1679 first_error (_("scalar index out of range"));
1680 return FAIL;
1681 }
1682
1683 if (type)
1684 *type = atype.eltype;
1685
1686 *ccp = str;
1687
1688 return reg * 16 + atype.index;
1689 }
1690
1691 /* Types of registers in a list. */
1692
1693 enum reg_list_els
1694 {
1695 REGLIST_RN,
1696 REGLIST_CLRM,
1697 REGLIST_VFP_S,
1698 REGLIST_VFP_D,
1699 REGLIST_NEON_D
1700 };
1701
1702 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1703
1704 static long
1705 parse_reg_list (char ** strp, enum reg_list_els etype)
1706 {
1707 char *str = *strp;
1708 long range = 0;
1709 int another_range;
1710
1711 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1712
1713 /* We come back here if we get ranges concatenated by '+' or '|'. */
1714 do
1715 {
1716 skip_whitespace (str);
1717
1718 another_range = 0;
1719
1720 if (*str == '{')
1721 {
1722 int in_range = 0;
1723 int cur_reg = -1;
1724
1725 str++;
1726 do
1727 {
1728 int reg;
1729 const char apsr_str[] = "apsr";
1730 int apsr_str_len = strlen (apsr_str);
1731
1732 reg = arm_reg_parse (&str, REGLIST_RN);
1733 if (etype == REGLIST_CLRM)
1734 {
1735 if (reg == REG_SP || reg == REG_PC)
1736 reg = FAIL;
1737 else if (reg == FAIL
1738 && !strncasecmp (str, apsr_str, apsr_str_len)
1739 && !ISALPHA (*(str + apsr_str_len)))
1740 {
1741 reg = 15;
1742 str += apsr_str_len;
1743 }
1744
1745 if (reg == FAIL)
1746 {
1747 first_error (_("r0-r12, lr or APSR expected"));
1748 return FAIL;
1749 }
1750 }
1751 else /* etype == REGLIST_RN. */
1752 {
1753 if (reg == FAIL)
1754 {
1755 first_error (_(reg_expected_msgs[REGLIST_RN]));
1756 return FAIL;
1757 }
1758 }
1759
1760 if (in_range)
1761 {
1762 int i;
1763
1764 if (reg <= cur_reg)
1765 {
1766 first_error (_("bad range in register list"));
1767 return FAIL;
1768 }
1769
1770 for (i = cur_reg + 1; i < reg; i++)
1771 {
1772 if (range & (1 << i))
1773 as_tsktsk
1774 (_("Warning: duplicated register (r%d) in register list"),
1775 i);
1776 else
1777 range |= 1 << i;
1778 }
1779 in_range = 0;
1780 }
1781
1782 if (range & (1 << reg))
1783 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1784 reg);
1785 else if (reg <= cur_reg)
1786 as_tsktsk (_("Warning: register range not in ascending order"));
1787
1788 range |= 1 << reg;
1789 cur_reg = reg;
1790 }
1791 while (skip_past_comma (&str) != FAIL
1792 || (in_range = 1, *str++ == '-'));
1793 str--;
1794
1795 if (skip_past_char (&str, '}') == FAIL)
1796 {
1797 first_error (_("missing `}'"));
1798 return FAIL;
1799 }
1800 }
1801 else if (etype == REGLIST_RN)
1802 {
1803 expressionS exp;
1804
1805 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1806 return FAIL;
1807
1808 if (exp.X_op == O_constant)
1809 {
1810 if (exp.X_add_number
1811 != (exp.X_add_number & 0x0000ffff))
1812 {
1813 inst.error = _("invalid register mask");
1814 return FAIL;
1815 }
1816
1817 if ((range & exp.X_add_number) != 0)
1818 {
1819 int regno = range & exp.X_add_number;
1820
1821 regno &= -regno;
1822 regno = (1 << regno) - 1;
1823 as_tsktsk
1824 (_("Warning: duplicated register (r%d) in register list"),
1825 regno);
1826 }
1827
1828 range |= exp.X_add_number;
1829 }
1830 else
1831 {
1832 if (inst.relocs[0].type != 0)
1833 {
1834 inst.error = _("expression too complex");
1835 return FAIL;
1836 }
1837
1838 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1839 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1840 inst.relocs[0].pc_rel = 0;
1841 }
1842 }
1843
1844 if (*str == '|' || *str == '+')
1845 {
1846 str++;
1847 another_range = 1;
1848 }
1849 }
1850 while (another_range);
1851
1852 *strp = str;
1853 return range;
1854 }
1855
1856 /* Parse a VFP register list. If the string is invalid return FAIL.
1857 Otherwise return the number of registers, and set PBASE to the first
1858 register. Parses registers of type ETYPE.
1859 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1860 - Q registers can be used to specify pairs of D registers
1861 - { } can be omitted from around a singleton register list
1862 FIXME: This is not implemented, as it would require backtracking in
1863 some cases, e.g.:
1864 vtbl.8 d3,d4,d5
1865 This could be done (the meaning isn't really ambiguous), but doesn't
1866 fit in well with the current parsing framework.
1867 - 32 D registers may be used (also true for VFPv3).
1868 FIXME: Types are ignored in these register lists, which is probably a
1869 bug. */
1870
1871 static int
1872 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1873 {
1874 char *str = *ccp;
1875 int base_reg;
1876 int new_base;
1877 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1878 int max_regs = 0;
1879 int count = 0;
1880 int warned = 0;
1881 unsigned long mask = 0;
1882 int i;
1883
1884 if (skip_past_char (&str, '{') == FAIL)
1885 {
1886 inst.error = _("expecting {");
1887 return FAIL;
1888 }
1889
1890 switch (etype)
1891 {
1892 case REGLIST_VFP_S:
1893 regtype = REG_TYPE_VFS;
1894 max_regs = 32;
1895 break;
1896
1897 case REGLIST_VFP_D:
1898 regtype = REG_TYPE_VFD;
1899 break;
1900
1901 case REGLIST_NEON_D:
1902 regtype = REG_TYPE_NDQ;
1903 break;
1904
1905 default:
1906 gas_assert (0);
1907 }
1908
1909 if (etype != REGLIST_VFP_S)
1910 {
1911 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1912 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1913 {
1914 max_regs = 32;
1915 if (thumb_mode)
1916 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1917 fpu_vfp_ext_d32);
1918 else
1919 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1920 fpu_vfp_ext_d32);
1921 }
1922 else
1923 max_regs = 16;
1924 }
1925
1926 base_reg = max_regs;
1927
1928 do
1929 {
1930 int setmask = 1, addregs = 1;
1931
1932 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1933
1934 if (new_base == FAIL)
1935 {
1936 first_error (_(reg_expected_msgs[regtype]));
1937 return FAIL;
1938 }
1939
1940 if (new_base >= max_regs)
1941 {
1942 first_error (_("register out of range in list"));
1943 return FAIL;
1944 }
1945
1946 /* Note: a value of 2 * n is returned for the register Q<n>. */
1947 if (regtype == REG_TYPE_NQ)
1948 {
1949 setmask = 3;
1950 addregs = 2;
1951 }
1952
1953 if (new_base < base_reg)
1954 base_reg = new_base;
1955
1956 if (mask & (setmask << new_base))
1957 {
1958 first_error (_("invalid register list"));
1959 return FAIL;
1960 }
1961
1962 if ((mask >> new_base) != 0 && ! warned)
1963 {
1964 as_tsktsk (_("register list not in ascending order"));
1965 warned = 1;
1966 }
1967
1968 mask |= setmask << new_base;
1969 count += addregs;
1970
1971 if (*str == '-') /* We have the start of a range expression */
1972 {
1973 int high_range;
1974
1975 str++;
1976
1977 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1978 == FAIL)
1979 {
1980 inst.error = gettext (reg_expected_msgs[regtype]);
1981 return FAIL;
1982 }
1983
1984 if (high_range >= max_regs)
1985 {
1986 first_error (_("register out of range in list"));
1987 return FAIL;
1988 }
1989
1990 if (regtype == REG_TYPE_NQ)
1991 high_range = high_range + 1;
1992
1993 if (high_range <= new_base)
1994 {
1995 inst.error = _("register range not in ascending order");
1996 return FAIL;
1997 }
1998
1999 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2000 {
2001 if (mask & (setmask << new_base))
2002 {
2003 inst.error = _("invalid register list");
2004 return FAIL;
2005 }
2006
2007 mask |= setmask << new_base;
2008 count += addregs;
2009 }
2010 }
2011 }
2012 while (skip_past_comma (&str) != FAIL);
2013
2014 str++;
2015
2016 /* Sanity check -- should have raised a parse error above. */
2017 if (count == 0 || count > max_regs)
2018 abort ();
2019
2020 *pbase = base_reg;
2021
2022 /* Final test -- the registers must be consecutive. */
2023 mask >>= base_reg;
2024 for (i = 0; i < count; i++)
2025 {
2026 if ((mask & (1u << i)) == 0)
2027 {
2028 inst.error = _("non-contiguous register range");
2029 return FAIL;
2030 }
2031 }
2032
2033 *ccp = str;
2034
2035 return count;
2036 }
2037
2038 /* True if two alias types are the same. */
2039
2040 static bfd_boolean
2041 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2042 {
2043 if (!a && !b)
2044 return TRUE;
2045
2046 if (!a || !b)
2047 return FALSE;
2048
2049 if (a->defined != b->defined)
2050 return FALSE;
2051
2052 if ((a->defined & NTA_HASTYPE) != 0
2053 && (a->eltype.type != b->eltype.type
2054 || a->eltype.size != b->eltype.size))
2055 return FALSE;
2056
2057 if ((a->defined & NTA_HASINDEX) != 0
2058 && (a->index != b->index))
2059 return FALSE;
2060
2061 return TRUE;
2062 }
2063
2064 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2065 The base register is put in *PBASE.
2066 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2067 the return value.
2068 The register stride (minus one) is put in bit 4 of the return value.
2069 Bits [6:5] encode the list length (minus one).
2070 The type of the list elements is put in *ELTYPE, if non-NULL. */
2071
2072 #define NEON_LANE(X) ((X) & 0xf)
2073 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2074 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2075
2076 static int
2077 parse_neon_el_struct_list (char **str, unsigned *pbase,
2078 struct neon_type_el *eltype)
2079 {
2080 char *ptr = *str;
2081 int base_reg = -1;
2082 int reg_incr = -1;
2083 int count = 0;
2084 int lane = -1;
2085 int leading_brace = 0;
2086 enum arm_reg_type rtype = REG_TYPE_NDQ;
2087 const char *const incr_error = _("register stride must be 1 or 2");
2088 const char *const type_error = _("mismatched element/structure types in list");
2089 struct neon_typed_alias firsttype;
2090 firsttype.defined = 0;
2091 firsttype.eltype.type = NT_invtype;
2092 firsttype.eltype.size = -1;
2093 firsttype.index = -1;
2094
2095 if (skip_past_char (&ptr, '{') == SUCCESS)
2096 leading_brace = 1;
2097
2098 do
2099 {
2100 struct neon_typed_alias atype;
2101 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2102
2103 if (getreg == FAIL)
2104 {
2105 first_error (_(reg_expected_msgs[rtype]));
2106 return FAIL;
2107 }
2108
2109 if (base_reg == -1)
2110 {
2111 base_reg = getreg;
2112 if (rtype == REG_TYPE_NQ)
2113 {
2114 reg_incr = 1;
2115 }
2116 firsttype = atype;
2117 }
2118 else if (reg_incr == -1)
2119 {
2120 reg_incr = getreg - base_reg;
2121 if (reg_incr < 1 || reg_incr > 2)
2122 {
2123 first_error (_(incr_error));
2124 return FAIL;
2125 }
2126 }
2127 else if (getreg != base_reg + reg_incr * count)
2128 {
2129 first_error (_(incr_error));
2130 return FAIL;
2131 }
2132
2133 if (! neon_alias_types_same (&atype, &firsttype))
2134 {
2135 first_error (_(type_error));
2136 return FAIL;
2137 }
2138
2139 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2140 modes. */
2141 if (ptr[0] == '-')
2142 {
2143 struct neon_typed_alias htype;
2144 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2145 if (lane == -1)
2146 lane = NEON_INTERLEAVE_LANES;
2147 else if (lane != NEON_INTERLEAVE_LANES)
2148 {
2149 first_error (_(type_error));
2150 return FAIL;
2151 }
2152 if (reg_incr == -1)
2153 reg_incr = 1;
2154 else if (reg_incr != 1)
2155 {
2156 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2157 return FAIL;
2158 }
2159 ptr++;
2160 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2161 if (hireg == FAIL)
2162 {
2163 first_error (_(reg_expected_msgs[rtype]));
2164 return FAIL;
2165 }
2166 if (! neon_alias_types_same (&htype, &firsttype))
2167 {
2168 first_error (_(type_error));
2169 return FAIL;
2170 }
2171 count += hireg + dregs - getreg;
2172 continue;
2173 }
2174
2175 /* If we're using Q registers, we can't use [] or [n] syntax. */
2176 if (rtype == REG_TYPE_NQ)
2177 {
2178 count += 2;
2179 continue;
2180 }
2181
2182 if ((atype.defined & NTA_HASINDEX) != 0)
2183 {
2184 if (lane == -1)
2185 lane = atype.index;
2186 else if (lane != atype.index)
2187 {
2188 first_error (_(type_error));
2189 return FAIL;
2190 }
2191 }
2192 else if (lane == -1)
2193 lane = NEON_INTERLEAVE_LANES;
2194 else if (lane != NEON_INTERLEAVE_LANES)
2195 {
2196 first_error (_(type_error));
2197 return FAIL;
2198 }
2199 count++;
2200 }
2201 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2202
2203 /* No lane set by [x]. We must be interleaving structures. */
2204 if (lane == -1)
2205 lane = NEON_INTERLEAVE_LANES;
2206
2207 /* Sanity check. */
2208 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2209 || (count > 1 && reg_incr == -1))
2210 {
2211 first_error (_("error parsing element/structure list"));
2212 return FAIL;
2213 }
2214
2215 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2216 {
2217 first_error (_("expected }"));
2218 return FAIL;
2219 }
2220
2221 if (reg_incr == -1)
2222 reg_incr = 1;
2223
2224 if (eltype)
2225 *eltype = firsttype.eltype;
2226
2227 *pbase = base_reg;
2228 *str = ptr;
2229
2230 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2231 }
2232
2233 /* Parse an explicit relocation suffix on an expression. This is
2234 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2235 arm_reloc_hsh contains no entries, so this function can only
2236 succeed if there is no () after the word. Returns -1 on error,
2237 BFD_RELOC_UNUSED if there wasn't any suffix. */
2238
2239 static int
2240 parse_reloc (char **str)
2241 {
2242 struct reloc_entry *r;
2243 char *p, *q;
2244
2245 if (**str != '(')
2246 return BFD_RELOC_UNUSED;
2247
2248 p = *str + 1;
2249 q = p;
2250
2251 while (*q && *q != ')' && *q != ',')
2252 q++;
2253 if (*q != ')')
2254 return -1;
2255
2256 if ((r = (struct reloc_entry *)
2257 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2258 return -1;
2259
2260 *str = q + 1;
2261 return r->reloc;
2262 }
2263
2264 /* Directives: register aliases. */
2265
2266 static struct reg_entry *
2267 insert_reg_alias (char *str, unsigned number, int type)
2268 {
2269 struct reg_entry *new_reg;
2270 const char *name;
2271
2272 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2273 {
2274 if (new_reg->builtin)
2275 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2276
2277 /* Only warn about a redefinition if it's not defined as the
2278 same register. */
2279 else if (new_reg->number != number || new_reg->type != type)
2280 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2281
2282 return NULL;
2283 }
2284
2285 name = xstrdup (str);
2286 new_reg = XNEW (struct reg_entry);
2287
2288 new_reg->name = name;
2289 new_reg->number = number;
2290 new_reg->type = type;
2291 new_reg->builtin = FALSE;
2292 new_reg->neon = NULL;
2293
2294 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2295 abort ();
2296
2297 return new_reg;
2298 }
2299
2300 static void
2301 insert_neon_reg_alias (char *str, int number, int type,
2302 struct neon_typed_alias *atype)
2303 {
2304 struct reg_entry *reg = insert_reg_alias (str, number, type);
2305
2306 if (!reg)
2307 {
2308 first_error (_("attempt to redefine typed alias"));
2309 return;
2310 }
2311
2312 if (atype)
2313 {
2314 reg->neon = XNEW (struct neon_typed_alias);
2315 *reg->neon = *atype;
2316 }
2317 }
2318
2319 /* Look for the .req directive. This is of the form:
2320
2321 new_register_name .req existing_register_name
2322
2323 If we find one, or if it looks sufficiently like one that we want to
2324 handle any error here, return TRUE. Otherwise return FALSE. */
2325
2326 static bfd_boolean
2327 create_register_alias (char * newname, char *p)
2328 {
2329 struct reg_entry *old;
2330 char *oldname, *nbuf;
2331 size_t nlen;
2332
2333 /* The input scrubber ensures that whitespace after the mnemonic is
2334 collapsed to single spaces. */
2335 oldname = p;
2336 if (strncmp (oldname, " .req ", 6) != 0)
2337 return FALSE;
2338
2339 oldname += 6;
2340 if (*oldname == '\0')
2341 return FALSE;
2342
2343 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2344 if (!old)
2345 {
2346 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2347 return TRUE;
2348 }
2349
2350 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2351 the desired alias name, and p points to its end. If not, then
2352 the desired alias name is in the global original_case_string. */
2353 #ifdef TC_CASE_SENSITIVE
2354 nlen = p - newname;
2355 #else
2356 newname = original_case_string;
2357 nlen = strlen (newname);
2358 #endif
2359
2360 nbuf = xmemdup0 (newname, nlen);
2361
2362 /* Create aliases under the new name as stated; an all-lowercase
2363 version of the new name; and an all-uppercase version of the new
2364 name. */
2365 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2366 {
2367 for (p = nbuf; *p; p++)
2368 *p = TOUPPER (*p);
2369
2370 if (strncmp (nbuf, newname, nlen))
2371 {
2372 /* If this attempt to create an additional alias fails, do not bother
2373 trying to create the all-lower case alias. We will fail and issue
2374 a second, duplicate error message. This situation arises when the
2375 programmer does something like:
2376 foo .req r0
2377 Foo .req r1
2378 The second .req creates the "Foo" alias but then fails to create
2379 the artificial FOO alias because it has already been created by the
2380 first .req. */
2381 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2382 {
2383 free (nbuf);
2384 return TRUE;
2385 }
2386 }
2387
2388 for (p = nbuf; *p; p++)
2389 *p = TOLOWER (*p);
2390
2391 if (strncmp (nbuf, newname, nlen))
2392 insert_reg_alias (nbuf, old->number, old->type);
2393 }
2394
2395 free (nbuf);
2396 return TRUE;
2397 }
2398
2399 /* Create a Neon typed/indexed register alias using directives, e.g.:
2400 X .dn d5.s32[1]
2401 Y .qn 6.s16
2402 Z .dn d7
2403 T .dn Z[0]
2404 These typed registers can be used instead of the types specified after the
2405 Neon mnemonic, so long as all operands given have types. Types can also be
2406 specified directly, e.g.:
2407 vadd d0.s32, d1.s32, d2.s32 */
2408
2409 static bfd_boolean
2410 create_neon_reg_alias (char *newname, char *p)
2411 {
2412 enum arm_reg_type basetype;
2413 struct reg_entry *basereg;
2414 struct reg_entry mybasereg;
2415 struct neon_type ntype;
2416 struct neon_typed_alias typeinfo;
2417 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2418 int namelen;
2419
2420 typeinfo.defined = 0;
2421 typeinfo.eltype.type = NT_invtype;
2422 typeinfo.eltype.size = -1;
2423 typeinfo.index = -1;
2424
2425 nameend = p;
2426
2427 if (strncmp (p, " .dn ", 5) == 0)
2428 basetype = REG_TYPE_VFD;
2429 else if (strncmp (p, " .qn ", 5) == 0)
2430 basetype = REG_TYPE_NQ;
2431 else
2432 return FALSE;
2433
2434 p += 5;
2435
2436 if (*p == '\0')
2437 return FALSE;
2438
2439 basereg = arm_reg_parse_multi (&p);
2440
2441 if (basereg && basereg->type != basetype)
2442 {
2443 as_bad (_("bad type for register"));
2444 return FALSE;
2445 }
2446
2447 if (basereg == NULL)
2448 {
2449 expressionS exp;
2450 /* Try parsing as an integer. */
2451 my_get_expression (&exp, &p, GE_NO_PREFIX);
2452 if (exp.X_op != O_constant)
2453 {
2454 as_bad (_("expression must be constant"));
2455 return FALSE;
2456 }
2457 basereg = &mybasereg;
2458 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2459 : exp.X_add_number;
2460 basereg->neon = 0;
2461 }
2462
2463 if (basereg->neon)
2464 typeinfo = *basereg->neon;
2465
2466 if (parse_neon_type (&ntype, &p) == SUCCESS)
2467 {
2468 /* We got a type. */
2469 if (typeinfo.defined & NTA_HASTYPE)
2470 {
2471 as_bad (_("can't redefine the type of a register alias"));
2472 return FALSE;
2473 }
2474
2475 typeinfo.defined |= NTA_HASTYPE;
2476 if (ntype.elems != 1)
2477 {
2478 as_bad (_("you must specify a single type only"));
2479 return FALSE;
2480 }
2481 typeinfo.eltype = ntype.el[0];
2482 }
2483
2484 if (skip_past_char (&p, '[') == SUCCESS)
2485 {
2486 expressionS exp;
2487 /* We got a scalar index. */
2488
2489 if (typeinfo.defined & NTA_HASINDEX)
2490 {
2491 as_bad (_("can't redefine the index of a scalar alias"));
2492 return FALSE;
2493 }
2494
2495 my_get_expression (&exp, &p, GE_NO_PREFIX);
2496
2497 if (exp.X_op != O_constant)
2498 {
2499 as_bad (_("scalar index must be constant"));
2500 return FALSE;
2501 }
2502
2503 typeinfo.defined |= NTA_HASINDEX;
2504 typeinfo.index = exp.X_add_number;
2505
2506 if (skip_past_char (&p, ']') == FAIL)
2507 {
2508 as_bad (_("expecting ]"));
2509 return FALSE;
2510 }
2511 }
2512
2513 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2514 the desired alias name, and p points to its end. If not, then
2515 the desired alias name is in the global original_case_string. */
2516 #ifdef TC_CASE_SENSITIVE
2517 namelen = nameend - newname;
2518 #else
2519 newname = original_case_string;
2520 namelen = strlen (newname);
2521 #endif
2522
2523 namebuf = xmemdup0 (newname, namelen);
2524
2525 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2526 typeinfo.defined != 0 ? &typeinfo : NULL);
2527
2528 /* Insert name in all uppercase. */
2529 for (p = namebuf; *p; p++)
2530 *p = TOUPPER (*p);
2531
2532 if (strncmp (namebuf, newname, namelen))
2533 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2534 typeinfo.defined != 0 ? &typeinfo : NULL);
2535
2536 /* Insert name in all lowercase. */
2537 for (p = namebuf; *p; p++)
2538 *p = TOLOWER (*p);
2539
2540 if (strncmp (namebuf, newname, namelen))
2541 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2542 typeinfo.defined != 0 ? &typeinfo : NULL);
2543
2544 free (namebuf);
2545 return TRUE;
2546 }
2547
2548 /* Should never be called, as .req goes between the alias and the
2549 register name, not at the beginning of the line. */
2550
2551 static void
2552 s_req (int a ATTRIBUTE_UNUSED)
2553 {
2554 as_bad (_("invalid syntax for .req directive"));
2555 }
2556
2557 static void
2558 s_dn (int a ATTRIBUTE_UNUSED)
2559 {
2560 as_bad (_("invalid syntax for .dn directive"));
2561 }
2562
2563 static void
2564 s_qn (int a ATTRIBUTE_UNUSED)
2565 {
2566 as_bad (_("invalid syntax for .qn directive"));
2567 }
2568
2569 /* The .unreq directive deletes an alias which was previously defined
2570 by .req. For example:
2571
2572 my_alias .req r11
2573 .unreq my_alias */
2574
2575 static void
2576 s_unreq (int a ATTRIBUTE_UNUSED)
2577 {
2578 char * name;
2579 char saved_char;
2580
2581 name = input_line_pointer;
2582
2583 while (*input_line_pointer != 0
2584 && *input_line_pointer != ' '
2585 && *input_line_pointer != '\n')
2586 ++input_line_pointer;
2587
2588 saved_char = *input_line_pointer;
2589 *input_line_pointer = 0;
2590
2591 if (!*name)
2592 as_bad (_("invalid syntax for .unreq directive"));
2593 else
2594 {
2595 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2596 name);
2597
2598 if (!reg)
2599 as_bad (_("unknown register alias '%s'"), name);
2600 else if (reg->builtin)
2601 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2602 name);
2603 else
2604 {
2605 char * p;
2606 char * nbuf;
2607
2608 hash_delete (arm_reg_hsh, name, FALSE);
2609 free ((char *) reg->name);
2610 if (reg->neon)
2611 free (reg->neon);
2612 free (reg);
2613
2614 /* Also locate the all upper case and all lower case versions.
2615 Do not complain if we cannot find one or the other as it
2616 was probably deleted above. */
2617
2618 nbuf = strdup (name);
2619 for (p = nbuf; *p; p++)
2620 *p = TOUPPER (*p);
2621 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2622 if (reg)
2623 {
2624 hash_delete (arm_reg_hsh, nbuf, FALSE);
2625 free ((char *) reg->name);
2626 if (reg->neon)
2627 free (reg->neon);
2628 free (reg);
2629 }
2630
2631 for (p = nbuf; *p; p++)
2632 *p = TOLOWER (*p);
2633 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2634 if (reg)
2635 {
2636 hash_delete (arm_reg_hsh, nbuf, FALSE);
2637 free ((char *) reg->name);
2638 if (reg->neon)
2639 free (reg->neon);
2640 free (reg);
2641 }
2642
2643 free (nbuf);
2644 }
2645 }
2646
2647 *input_line_pointer = saved_char;
2648 demand_empty_rest_of_line ();
2649 }
2650
2651 /* Directives: Instruction set selection. */
2652
2653 #ifdef OBJ_ELF
2654 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2655 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2656 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2657 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2658
2659 /* Create a new mapping symbol for the transition to STATE. */
2660
2661 static void
2662 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2663 {
2664 symbolS * symbolP;
2665 const char * symname;
2666 int type;
2667
2668 switch (state)
2669 {
2670 case MAP_DATA:
2671 symname = "$d";
2672 type = BSF_NO_FLAGS;
2673 break;
2674 case MAP_ARM:
2675 symname = "$a";
2676 type = BSF_NO_FLAGS;
2677 break;
2678 case MAP_THUMB:
2679 symname = "$t";
2680 type = BSF_NO_FLAGS;
2681 break;
2682 default:
2683 abort ();
2684 }
2685
2686 symbolP = symbol_new (symname, now_seg, value, frag);
2687 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2688
2689 switch (state)
2690 {
2691 case MAP_ARM:
2692 THUMB_SET_FUNC (symbolP, 0);
2693 ARM_SET_THUMB (symbolP, 0);
2694 ARM_SET_INTERWORK (symbolP, support_interwork);
2695 break;
2696
2697 case MAP_THUMB:
2698 THUMB_SET_FUNC (symbolP, 1);
2699 ARM_SET_THUMB (symbolP, 1);
2700 ARM_SET_INTERWORK (symbolP, support_interwork);
2701 break;
2702
2703 case MAP_DATA:
2704 default:
2705 break;
2706 }
2707
2708 /* Save the mapping symbols for future reference. Also check that
2709 we do not place two mapping symbols at the same offset within a
2710 frag. We'll handle overlap between frags in
2711 check_mapping_symbols.
2712
2713 If .fill or other data filling directive generates zero sized data,
2714 the mapping symbol for the following code will have the same value
2715 as the one generated for the data filling directive. In this case,
2716 we replace the old symbol with the new one at the same address. */
2717 if (value == 0)
2718 {
2719 if (frag->tc_frag_data.first_map != NULL)
2720 {
2721 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2722 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2723 }
2724 frag->tc_frag_data.first_map = symbolP;
2725 }
2726 if (frag->tc_frag_data.last_map != NULL)
2727 {
2728 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2729 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2730 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2731 }
2732 frag->tc_frag_data.last_map = symbolP;
2733 }
2734
2735 /* We must sometimes convert a region marked as code to data during
2736 code alignment, if an odd number of bytes have to be padded. The
2737 code mapping symbol is pushed to an aligned address. */
2738
2739 static void
2740 insert_data_mapping_symbol (enum mstate state,
2741 valueT value, fragS *frag, offsetT bytes)
2742 {
2743 /* If there was already a mapping symbol, remove it. */
2744 if (frag->tc_frag_data.last_map != NULL
2745 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2746 {
2747 symbolS *symp = frag->tc_frag_data.last_map;
2748
2749 if (value == 0)
2750 {
2751 know (frag->tc_frag_data.first_map == symp);
2752 frag->tc_frag_data.first_map = NULL;
2753 }
2754 frag->tc_frag_data.last_map = NULL;
2755 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2756 }
2757
2758 make_mapping_symbol (MAP_DATA, value, frag);
2759 make_mapping_symbol (state, value + bytes, frag);
2760 }
2761
2762 static void mapping_state_2 (enum mstate state, int max_chars);
2763
2764 /* Set the mapping state to STATE. Only call this when about to
2765 emit some STATE bytes to the file. */
2766
2767 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2768 void
2769 mapping_state (enum mstate state)
2770 {
2771 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2772
2773 if (mapstate == state)
2774 /* The mapping symbol has already been emitted.
2775 There is nothing else to do. */
2776 return;
2777
2778 if (state == MAP_ARM || state == MAP_THUMB)
2779 /* PR gas/12931
2780 All ARM instructions require 4-byte alignment.
2781 (Almost) all Thumb instructions require 2-byte alignment.
2782
2783 When emitting instructions into any section, mark the section
2784 appropriately.
2785
2786 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2787 but themselves require 2-byte alignment; this applies to some
2788 PC- relative forms. However, these cases will involve implicit
2789 literal pool generation or an explicit .align >=2, both of
2790 which will cause the section to me marked with sufficient
2791 alignment. Thus, we don't handle those cases here. */
2792 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2793
2794 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2795 /* This case will be evaluated later. */
2796 return;
2797
2798 mapping_state_2 (state, 0);
2799 }
2800
2801 /* Same as mapping_state, but MAX_CHARS bytes have already been
2802 allocated. Put the mapping symbol that far back. */
2803
2804 static void
2805 mapping_state_2 (enum mstate state, int max_chars)
2806 {
2807 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2808
2809 if (!SEG_NORMAL (now_seg))
2810 return;
2811
2812 if (mapstate == state)
2813 /* The mapping symbol has already been emitted.
2814 There is nothing else to do. */
2815 return;
2816
2817 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2818 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2819 {
2820 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2821 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2822
2823 if (add_symbol)
2824 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2825 }
2826
2827 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2828 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2829 }
2830 #undef TRANSITION
2831 #else
2832 #define mapping_state(x) ((void)0)
2833 #define mapping_state_2(x, y) ((void)0)
2834 #endif
2835
2836 /* Find the real, Thumb encoded start of a Thumb function. */
2837
2838 #ifdef OBJ_COFF
2839 static symbolS *
2840 find_real_start (symbolS * symbolP)
2841 {
2842 char * real_start;
2843 const char * name = S_GET_NAME (symbolP);
2844 symbolS * new_target;
2845
2846 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2847 #define STUB_NAME ".real_start_of"
2848
2849 if (name == NULL)
2850 abort ();
2851
2852 /* The compiler may generate BL instructions to local labels because
2853 it needs to perform a branch to a far away location. These labels
2854 do not have a corresponding ".real_start_of" label. We check
2855 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2856 the ".real_start_of" convention for nonlocal branches. */
2857 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2858 return symbolP;
2859
2860 real_start = concat (STUB_NAME, name, NULL);
2861 new_target = symbol_find (real_start);
2862 free (real_start);
2863
2864 if (new_target == NULL)
2865 {
2866 as_warn (_("Failed to find real start of function: %s\n"), name);
2867 new_target = symbolP;
2868 }
2869
2870 return new_target;
2871 }
2872 #endif
2873
2874 static void
2875 opcode_select (int width)
2876 {
2877 switch (width)
2878 {
2879 case 16:
2880 if (! thumb_mode)
2881 {
2882 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2883 as_bad (_("selected processor does not support THUMB opcodes"));
2884
2885 thumb_mode = 1;
2886 /* No need to force the alignment, since we will have been
2887 coming from ARM mode, which is word-aligned. */
2888 record_alignment (now_seg, 1);
2889 }
2890 break;
2891
2892 case 32:
2893 if (thumb_mode)
2894 {
2895 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2896 as_bad (_("selected processor does not support ARM opcodes"));
2897
2898 thumb_mode = 0;
2899
2900 if (!need_pass_2)
2901 frag_align (2, 0, 0);
2902
2903 record_alignment (now_seg, 1);
2904 }
2905 break;
2906
2907 default:
2908 as_bad (_("invalid instruction size selected (%d)"), width);
2909 }
2910 }
2911
2912 static void
2913 s_arm (int ignore ATTRIBUTE_UNUSED)
2914 {
2915 opcode_select (32);
2916 demand_empty_rest_of_line ();
2917 }
2918
2919 static void
2920 s_thumb (int ignore ATTRIBUTE_UNUSED)
2921 {
2922 opcode_select (16);
2923 demand_empty_rest_of_line ();
2924 }
2925
2926 static void
2927 s_code (int unused ATTRIBUTE_UNUSED)
2928 {
2929 int temp;
2930
2931 temp = get_absolute_expression ();
2932 switch (temp)
2933 {
2934 case 16:
2935 case 32:
2936 opcode_select (temp);
2937 break;
2938
2939 default:
2940 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2941 }
2942 }
2943
2944 static void
2945 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2946 {
2947 /* If we are not already in thumb mode go into it, EVEN if
2948 the target processor does not support thumb instructions.
2949 This is used by gcc/config/arm/lib1funcs.asm for example
2950 to compile interworking support functions even if the
2951 target processor should not support interworking. */
2952 if (! thumb_mode)
2953 {
2954 thumb_mode = 2;
2955 record_alignment (now_seg, 1);
2956 }
2957
2958 demand_empty_rest_of_line ();
2959 }
2960
2961 static void
2962 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2963 {
2964 s_thumb (0);
2965
2966 /* The following label is the name/address of the start of a Thumb function.
2967 We need to know this for the interworking support. */
2968 label_is_thumb_function_name = TRUE;
2969 }
2970
2971 /* Perform a .set directive, but also mark the alias as
2972 being a thumb function. */
2973
2974 static void
2975 s_thumb_set (int equiv)
2976 {
2977 /* XXX the following is a duplicate of the code for s_set() in read.c
2978 We cannot just call that code as we need to get at the symbol that
2979 is created. */
2980 char * name;
2981 char delim;
2982 char * end_name;
2983 symbolS * symbolP;
2984
2985 /* Especial apologies for the random logic:
2986 This just grew, and could be parsed much more simply!
2987 Dean - in haste. */
2988 delim = get_symbol_name (& name);
2989 end_name = input_line_pointer;
2990 (void) restore_line_pointer (delim);
2991
2992 if (*input_line_pointer != ',')
2993 {
2994 *end_name = 0;
2995 as_bad (_("expected comma after name \"%s\""), name);
2996 *end_name = delim;
2997 ignore_rest_of_line ();
2998 return;
2999 }
3000
3001 input_line_pointer++;
3002 *end_name = 0;
3003
3004 if (name[0] == '.' && name[1] == '\0')
3005 {
3006 /* XXX - this should not happen to .thumb_set. */
3007 abort ();
3008 }
3009
3010 if ((symbolP = symbol_find (name)) == NULL
3011 && (symbolP = md_undefined_symbol (name)) == NULL)
3012 {
3013 #ifndef NO_LISTING
3014 /* When doing symbol listings, play games with dummy fragments living
3015 outside the normal fragment chain to record the file and line info
3016 for this symbol. */
3017 if (listing & LISTING_SYMBOLS)
3018 {
3019 extern struct list_info_struct * listing_tail;
3020 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3021
3022 memset (dummy_frag, 0, sizeof (fragS));
3023 dummy_frag->fr_type = rs_fill;
3024 dummy_frag->line = listing_tail;
3025 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3026 dummy_frag->fr_symbol = symbolP;
3027 }
3028 else
3029 #endif
3030 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3031
3032 #ifdef OBJ_COFF
3033 /* "set" symbols are local unless otherwise specified. */
3034 SF_SET_LOCAL (symbolP);
3035 #endif /* OBJ_COFF */
3036 } /* Make a new symbol. */
3037
3038 symbol_table_insert (symbolP);
3039
3040 * end_name = delim;
3041
3042 if (equiv
3043 && S_IS_DEFINED (symbolP)
3044 && S_GET_SEGMENT (symbolP) != reg_section)
3045 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3046
3047 pseudo_set (symbolP);
3048
3049 demand_empty_rest_of_line ();
3050
3051 /* XXX Now we come to the Thumb specific bit of code. */
3052
3053 THUMB_SET_FUNC (symbolP, 1);
3054 ARM_SET_THUMB (symbolP, 1);
3055 #if defined OBJ_ELF || defined OBJ_COFF
3056 ARM_SET_INTERWORK (symbolP, support_interwork);
3057 #endif
3058 }
3059
3060 /* Directives: Mode selection. */
3061
3062 /* .syntax [unified|divided] - choose the new unified syntax
3063 (same for Arm and Thumb encoding, modulo slight differences in what
3064 can be represented) or the old divergent syntax for each mode. */
3065 static void
3066 s_syntax (int unused ATTRIBUTE_UNUSED)
3067 {
3068 char *name, delim;
3069
3070 delim = get_symbol_name (& name);
3071
3072 if (!strcasecmp (name, "unified"))
3073 unified_syntax = TRUE;
3074 else if (!strcasecmp (name, "divided"))
3075 unified_syntax = FALSE;
3076 else
3077 {
3078 as_bad (_("unrecognized syntax mode \"%s\""), name);
3079 return;
3080 }
3081 (void) restore_line_pointer (delim);
3082 demand_empty_rest_of_line ();
3083 }
3084
3085 /* Directives: sectioning and alignment. */
3086
3087 static void
3088 s_bss (int ignore ATTRIBUTE_UNUSED)
3089 {
3090 /* We don't support putting frags in the BSS segment, we fake it by
3091 marking in_bss, then looking at s_skip for clues. */
3092 subseg_set (bss_section, 0);
3093 demand_empty_rest_of_line ();
3094
3095 #ifdef md_elf_section_change_hook
3096 md_elf_section_change_hook ();
3097 #endif
3098 }
3099
3100 static void
3101 s_even (int ignore ATTRIBUTE_UNUSED)
3102 {
3103 /* Never make frag if expect extra pass. */
3104 if (!need_pass_2)
3105 frag_align (1, 0, 0);
3106
3107 record_alignment (now_seg, 1);
3108
3109 demand_empty_rest_of_line ();
3110 }
3111
3112 /* Directives: CodeComposer Studio. */
3113
3114 /* .ref (for CodeComposer Studio syntax only). */
3115 static void
3116 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3117 {
3118 if (codecomposer_syntax)
3119 ignore_rest_of_line ();
3120 else
3121 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3122 }
3123
3124 /* If name is not NULL, then it is used for marking the beginning of a
3125 function, whereas if it is NULL then it means the function end. */
3126 static void
3127 asmfunc_debug (const char * name)
3128 {
3129 static const char * last_name = NULL;
3130
3131 if (name != NULL)
3132 {
3133 gas_assert (last_name == NULL);
3134 last_name = name;
3135
3136 if (debug_type == DEBUG_STABS)
3137 stabs_generate_asm_func (name, name);
3138 }
3139 else
3140 {
3141 gas_assert (last_name != NULL);
3142
3143 if (debug_type == DEBUG_STABS)
3144 stabs_generate_asm_endfunc (last_name, last_name);
3145
3146 last_name = NULL;
3147 }
3148 }
3149
3150 static void
3151 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3152 {
3153 if (codecomposer_syntax)
3154 {
3155 switch (asmfunc_state)
3156 {
3157 case OUTSIDE_ASMFUNC:
3158 asmfunc_state = WAITING_ASMFUNC_NAME;
3159 break;
3160
3161 case WAITING_ASMFUNC_NAME:
3162 as_bad (_(".asmfunc repeated."));
3163 break;
3164
3165 case WAITING_ENDASMFUNC:
3166 as_bad (_(".asmfunc without function."));
3167 break;
3168 }
3169 demand_empty_rest_of_line ();
3170 }
3171 else
3172 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3173 }
3174
3175 static void
3176 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3177 {
3178 if (codecomposer_syntax)
3179 {
3180 switch (asmfunc_state)
3181 {
3182 case OUTSIDE_ASMFUNC:
3183 as_bad (_(".endasmfunc without a .asmfunc."));
3184 break;
3185
3186 case WAITING_ASMFUNC_NAME:
3187 as_bad (_(".endasmfunc without function."));
3188 break;
3189
3190 case WAITING_ENDASMFUNC:
3191 asmfunc_state = OUTSIDE_ASMFUNC;
3192 asmfunc_debug (NULL);
3193 break;
3194 }
3195 demand_empty_rest_of_line ();
3196 }
3197 else
3198 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3199 }
3200
3201 static void
3202 s_ccs_def (int name)
3203 {
3204 if (codecomposer_syntax)
3205 s_globl (name);
3206 else
3207 as_bad (_(".def pseudo-op only available with -mccs flag."));
3208 }
3209
3210 /* Directives: Literal pools. */
3211
3212 static literal_pool *
3213 find_literal_pool (void)
3214 {
3215 literal_pool * pool;
3216
3217 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3218 {
3219 if (pool->section == now_seg
3220 && pool->sub_section == now_subseg)
3221 break;
3222 }
3223
3224 return pool;
3225 }
3226
3227 static literal_pool *
3228 find_or_make_literal_pool (void)
3229 {
3230 /* Next literal pool ID number. */
3231 static unsigned int latest_pool_num = 1;
3232 literal_pool * pool;
3233
3234 pool = find_literal_pool ();
3235
3236 if (pool == NULL)
3237 {
3238 /* Create a new pool. */
3239 pool = XNEW (literal_pool);
3240 if (! pool)
3241 return NULL;
3242
3243 pool->next_free_entry = 0;
3244 pool->section = now_seg;
3245 pool->sub_section = now_subseg;
3246 pool->next = list_of_pools;
3247 pool->symbol = NULL;
3248 pool->alignment = 2;
3249
3250 /* Add it to the list. */
3251 list_of_pools = pool;
3252 }
3253
3254 /* New pools, and emptied pools, will have a NULL symbol. */
3255 if (pool->symbol == NULL)
3256 {
3257 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3258 (valueT) 0, &zero_address_frag);
3259 pool->id = latest_pool_num ++;
3260 }
3261
3262 /* Done. */
3263 return pool;
3264 }
3265
3266 /* Add the literal in the global 'inst'
3267 structure to the relevant literal pool. */
3268
3269 static int
3270 add_to_lit_pool (unsigned int nbytes)
3271 {
3272 #define PADDING_SLOT 0x1
3273 #define LIT_ENTRY_SIZE_MASK 0xFF
3274 literal_pool * pool;
3275 unsigned int entry, pool_size = 0;
3276 bfd_boolean padding_slot_p = FALSE;
3277 unsigned imm1 = 0;
3278 unsigned imm2 = 0;
3279
3280 if (nbytes == 8)
3281 {
3282 imm1 = inst.operands[1].imm;
3283 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3284 : inst.relocs[0].exp.X_unsigned ? 0
3285 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3286 if (target_big_endian)
3287 {
3288 imm1 = imm2;
3289 imm2 = inst.operands[1].imm;
3290 }
3291 }
3292
3293 pool = find_or_make_literal_pool ();
3294
3295 /* Check if this literal value is already in the pool. */
3296 for (entry = 0; entry < pool->next_free_entry; entry ++)
3297 {
3298 if (nbytes == 4)
3299 {
3300 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3301 && (inst.relocs[0].exp.X_op == O_constant)
3302 && (pool->literals[entry].X_add_number
3303 == inst.relocs[0].exp.X_add_number)
3304 && (pool->literals[entry].X_md == nbytes)
3305 && (pool->literals[entry].X_unsigned
3306 == inst.relocs[0].exp.X_unsigned))
3307 break;
3308
3309 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3310 && (inst.relocs[0].exp.X_op == O_symbol)
3311 && (pool->literals[entry].X_add_number
3312 == inst.relocs[0].exp.X_add_number)
3313 && (pool->literals[entry].X_add_symbol
3314 == inst.relocs[0].exp.X_add_symbol)
3315 && (pool->literals[entry].X_op_symbol
3316 == inst.relocs[0].exp.X_op_symbol)
3317 && (pool->literals[entry].X_md == nbytes))
3318 break;
3319 }
3320 else if ((nbytes == 8)
3321 && !(pool_size & 0x7)
3322 && ((entry + 1) != pool->next_free_entry)
3323 && (pool->literals[entry].X_op == O_constant)
3324 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3325 && (pool->literals[entry].X_unsigned
3326 == inst.relocs[0].exp.X_unsigned)
3327 && (pool->literals[entry + 1].X_op == O_constant)
3328 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3329 && (pool->literals[entry + 1].X_unsigned
3330 == inst.relocs[0].exp.X_unsigned))
3331 break;
3332
3333 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3334 if (padding_slot_p && (nbytes == 4))
3335 break;
3336
3337 pool_size += 4;
3338 }
3339
3340 /* Do we need to create a new entry? */
3341 if (entry == pool->next_free_entry)
3342 {
3343 if (entry >= MAX_LITERAL_POOL_SIZE)
3344 {
3345 inst.error = _("literal pool overflow");
3346 return FAIL;
3347 }
3348
3349 if (nbytes == 8)
3350 {
3351 /* For 8-byte entries, we align to an 8-byte boundary,
3352 and split it into two 4-byte entries, because on 32-bit
3353 host, 8-byte constants are treated as big num, thus
3354 saved in "generic_bignum" which will be overwritten
3355 by later assignments.
3356
3357 We also need to make sure there is enough space for
3358 the split.
3359
3360 We also check to make sure the literal operand is a
3361 constant number. */
3362 if (!(inst.relocs[0].exp.X_op == O_constant
3363 || inst.relocs[0].exp.X_op == O_big))
3364 {
3365 inst.error = _("invalid type for literal pool");
3366 return FAIL;
3367 }
3368 else if (pool_size & 0x7)
3369 {
3370 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3371 {
3372 inst.error = _("literal pool overflow");
3373 return FAIL;
3374 }
3375
3376 pool->literals[entry] = inst.relocs[0].exp;
3377 pool->literals[entry].X_op = O_constant;
3378 pool->literals[entry].X_add_number = 0;
3379 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3380 pool->next_free_entry += 1;
3381 pool_size += 4;
3382 }
3383 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3384 {
3385 inst.error = _("literal pool overflow");
3386 return FAIL;
3387 }
3388
3389 pool->literals[entry] = inst.relocs[0].exp;
3390 pool->literals[entry].X_op = O_constant;
3391 pool->literals[entry].X_add_number = imm1;
3392 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3393 pool->literals[entry++].X_md = 4;
3394 pool->literals[entry] = inst.relocs[0].exp;
3395 pool->literals[entry].X_op = O_constant;
3396 pool->literals[entry].X_add_number = imm2;
3397 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3398 pool->literals[entry].X_md = 4;
3399 pool->alignment = 3;
3400 pool->next_free_entry += 1;
3401 }
3402 else
3403 {
3404 pool->literals[entry] = inst.relocs[0].exp;
3405 pool->literals[entry].X_md = 4;
3406 }
3407
3408 #ifdef OBJ_ELF
3409 /* PR ld/12974: Record the location of the first source line to reference
3410 this entry in the literal pool. If it turns out during linking that the
3411 symbol does not exist we will be able to give an accurate line number for
3412 the (first use of the) missing reference. */
3413 if (debug_type == DEBUG_DWARF2)
3414 dwarf2_where (pool->locs + entry);
3415 #endif
3416 pool->next_free_entry += 1;
3417 }
3418 else if (padding_slot_p)
3419 {
3420 pool->literals[entry] = inst.relocs[0].exp;
3421 pool->literals[entry].X_md = nbytes;
3422 }
3423
3424 inst.relocs[0].exp.X_op = O_symbol;
3425 inst.relocs[0].exp.X_add_number = pool_size;
3426 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3427
3428 return SUCCESS;
3429 }
3430
3431 bfd_boolean
3432 tc_start_label_without_colon (void)
3433 {
3434 bfd_boolean ret = TRUE;
3435
3436 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3437 {
3438 const char *label = input_line_pointer;
3439
3440 while (!is_end_of_line[(int) label[-1]])
3441 --label;
3442
3443 if (*label == '.')
3444 {
3445 as_bad (_("Invalid label '%s'"), label);
3446 ret = FALSE;
3447 }
3448
3449 asmfunc_debug (label);
3450
3451 asmfunc_state = WAITING_ENDASMFUNC;
3452 }
3453
3454 return ret;
3455 }
3456
3457 /* Can't use symbol_new here, so have to create a symbol and then at
3458 a later date assign it a value. That's what these functions do. */
3459
3460 static void
3461 symbol_locate (symbolS * symbolP,
3462 const char * name, /* It is copied, the caller can modify. */
3463 segT segment, /* Segment identifier (SEG_<something>). */
3464 valueT valu, /* Symbol value. */
3465 fragS * frag) /* Associated fragment. */
3466 {
3467 size_t name_length;
3468 char * preserved_copy_of_name;
3469
3470 name_length = strlen (name) + 1; /* +1 for \0. */
3471 obstack_grow (&notes, name, name_length);
3472 preserved_copy_of_name = (char *) obstack_finish (&notes);
3473
3474 #ifdef tc_canonicalize_symbol_name
3475 preserved_copy_of_name =
3476 tc_canonicalize_symbol_name (preserved_copy_of_name);
3477 #endif
3478
3479 S_SET_NAME (symbolP, preserved_copy_of_name);
3480
3481 S_SET_SEGMENT (symbolP, segment);
3482 S_SET_VALUE (symbolP, valu);
3483 symbol_clear_list_pointers (symbolP);
3484
3485 symbol_set_frag (symbolP, frag);
3486
3487 /* Link to end of symbol chain. */
3488 {
3489 extern int symbol_table_frozen;
3490
3491 if (symbol_table_frozen)
3492 abort ();
3493 }
3494
3495 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3496
3497 obj_symbol_new_hook (symbolP);
3498
3499 #ifdef tc_symbol_new_hook
3500 tc_symbol_new_hook (symbolP);
3501 #endif
3502
3503 #ifdef DEBUG_SYMS
3504 verify_symbol_chain (symbol_rootP, symbol_lastP);
3505 #endif /* DEBUG_SYMS */
3506 }
3507
3508 static void
3509 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3510 {
3511 unsigned int entry;
3512 literal_pool * pool;
3513 char sym_name[20];
3514
3515 pool = find_literal_pool ();
3516 if (pool == NULL
3517 || pool->symbol == NULL
3518 || pool->next_free_entry == 0)
3519 return;
3520
3521 /* Align pool as you have word accesses.
3522 Only make a frag if we have to. */
3523 if (!need_pass_2)
3524 frag_align (pool->alignment, 0, 0);
3525
3526 record_alignment (now_seg, 2);
3527
3528 #ifdef OBJ_ELF
3529 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3530 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3531 #endif
3532 sprintf (sym_name, "$$lit_\002%x", pool->id);
3533
3534 symbol_locate (pool->symbol, sym_name, now_seg,
3535 (valueT) frag_now_fix (), frag_now);
3536 symbol_table_insert (pool->symbol);
3537
3538 ARM_SET_THUMB (pool->symbol, thumb_mode);
3539
3540 #if defined OBJ_COFF || defined OBJ_ELF
3541 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3542 #endif
3543
3544 for (entry = 0; entry < pool->next_free_entry; entry ++)
3545 {
3546 #ifdef OBJ_ELF
3547 if (debug_type == DEBUG_DWARF2)
3548 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3549 #endif
3550 /* First output the expression in the instruction to the pool. */
3551 emit_expr (&(pool->literals[entry]),
3552 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3553 }
3554
3555 /* Mark the pool as empty. */
3556 pool->next_free_entry = 0;
3557 pool->symbol = NULL;
3558 }
3559
3560 #ifdef OBJ_ELF
3561 /* Forward declarations for functions below, in the MD interface
3562 section. */
3563 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3564 static valueT create_unwind_entry (int);
3565 static void start_unwind_section (const segT, int);
3566 static void add_unwind_opcode (valueT, int);
3567 static void flush_pending_unwind (void);
3568
3569 /* Directives: Data. */
3570
3571 static void
3572 s_arm_elf_cons (int nbytes)
3573 {
3574 expressionS exp;
3575
3576 #ifdef md_flush_pending_output
3577 md_flush_pending_output ();
3578 #endif
3579
3580 if (is_it_end_of_statement ())
3581 {
3582 demand_empty_rest_of_line ();
3583 return;
3584 }
3585
3586 #ifdef md_cons_align
3587 md_cons_align (nbytes);
3588 #endif
3589
3590 mapping_state (MAP_DATA);
3591 do
3592 {
3593 int reloc;
3594 char *base = input_line_pointer;
3595
3596 expression (& exp);
3597
3598 if (exp.X_op != O_symbol)
3599 emit_expr (&exp, (unsigned int) nbytes);
3600 else
3601 {
3602 char *before_reloc = input_line_pointer;
3603 reloc = parse_reloc (&input_line_pointer);
3604 if (reloc == -1)
3605 {
3606 as_bad (_("unrecognized relocation suffix"));
3607 ignore_rest_of_line ();
3608 return;
3609 }
3610 else if (reloc == BFD_RELOC_UNUSED)
3611 emit_expr (&exp, (unsigned int) nbytes);
3612 else
3613 {
3614 reloc_howto_type *howto = (reloc_howto_type *)
3615 bfd_reloc_type_lookup (stdoutput,
3616 (bfd_reloc_code_real_type) reloc);
3617 int size = bfd_get_reloc_size (howto);
3618
3619 if (reloc == BFD_RELOC_ARM_PLT32)
3620 {
3621 as_bad (_("(plt) is only valid on branch targets"));
3622 reloc = BFD_RELOC_UNUSED;
3623 size = 0;
3624 }
3625
3626 if (size > nbytes)
3627 as_bad (ngettext ("%s relocations do not fit in %d byte",
3628 "%s relocations do not fit in %d bytes",
3629 nbytes),
3630 howto->name, nbytes);
3631 else
3632 {
3633 /* We've parsed an expression stopping at O_symbol.
3634 But there may be more expression left now that we
3635 have parsed the relocation marker. Parse it again.
3636 XXX Surely there is a cleaner way to do this. */
3637 char *p = input_line_pointer;
3638 int offset;
3639 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3640
3641 memcpy (save_buf, base, input_line_pointer - base);
3642 memmove (base + (input_line_pointer - before_reloc),
3643 base, before_reloc - base);
3644
3645 input_line_pointer = base + (input_line_pointer-before_reloc);
3646 expression (&exp);
3647 memcpy (base, save_buf, p - base);
3648
3649 offset = nbytes - size;
3650 p = frag_more (nbytes);
3651 memset (p, 0, nbytes);
3652 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3653 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3654 free (save_buf);
3655 }
3656 }
3657 }
3658 }
3659 while (*input_line_pointer++ == ',');
3660
3661 /* Put terminator back into stream. */
3662 input_line_pointer --;
3663 demand_empty_rest_of_line ();
3664 }
3665
3666 /* Emit an expression containing a 32-bit thumb instruction.
3667 Implementation based on put_thumb32_insn. */
3668
3669 static void
3670 emit_thumb32_expr (expressionS * exp)
3671 {
3672 expressionS exp_high = *exp;
3673
3674 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3675 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3676 exp->X_add_number &= 0xffff;
3677 emit_expr (exp, (unsigned int) THUMB_SIZE);
3678 }
3679
3680 /* Guess the instruction size based on the opcode. */
3681
3682 static int
3683 thumb_insn_size (int opcode)
3684 {
3685 if ((unsigned int) opcode < 0xe800u)
3686 return 2;
3687 else if ((unsigned int) opcode >= 0xe8000000u)
3688 return 4;
3689 else
3690 return 0;
3691 }
3692
3693 static bfd_boolean
3694 emit_insn (expressionS *exp, int nbytes)
3695 {
3696 int size = 0;
3697
3698 if (exp->X_op == O_constant)
3699 {
3700 size = nbytes;
3701
3702 if (size == 0)
3703 size = thumb_insn_size (exp->X_add_number);
3704
3705 if (size != 0)
3706 {
3707 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3708 {
3709 as_bad (_(".inst.n operand too big. "\
3710 "Use .inst.w instead"));
3711 size = 0;
3712 }
3713 else
3714 {
3715 if (now_it.state == AUTOMATIC_IT_BLOCK)
3716 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3717 else
3718 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3719
3720 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3721 emit_thumb32_expr (exp);
3722 else
3723 emit_expr (exp, (unsigned int) size);
3724
3725 it_fsm_post_encode ();
3726 }
3727 }
3728 else
3729 as_bad (_("cannot determine Thumb instruction size. " \
3730 "Use .inst.n/.inst.w instead"));
3731 }
3732 else
3733 as_bad (_("constant expression required"));
3734
3735 return (size != 0);
3736 }
3737
3738 /* Like s_arm_elf_cons but do not use md_cons_align and
3739 set the mapping state to MAP_ARM/MAP_THUMB. */
3740
3741 static void
3742 s_arm_elf_inst (int nbytes)
3743 {
3744 if (is_it_end_of_statement ())
3745 {
3746 demand_empty_rest_of_line ();
3747 return;
3748 }
3749
3750 /* Calling mapping_state () here will not change ARM/THUMB,
3751 but will ensure not to be in DATA state. */
3752
3753 if (thumb_mode)
3754 mapping_state (MAP_THUMB);
3755 else
3756 {
3757 if (nbytes != 0)
3758 {
3759 as_bad (_("width suffixes are invalid in ARM mode"));
3760 ignore_rest_of_line ();
3761 return;
3762 }
3763
3764 nbytes = 4;
3765
3766 mapping_state (MAP_ARM);
3767 }
3768
3769 do
3770 {
3771 expressionS exp;
3772
3773 expression (& exp);
3774
3775 if (! emit_insn (& exp, nbytes))
3776 {
3777 ignore_rest_of_line ();
3778 return;
3779 }
3780 }
3781 while (*input_line_pointer++ == ',');
3782
3783 /* Put terminator back into stream. */
3784 input_line_pointer --;
3785 demand_empty_rest_of_line ();
3786 }
3787
3788 /* Parse a .rel31 directive. */
3789
3790 static void
3791 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3792 {
3793 expressionS exp;
3794 char *p;
3795 valueT highbit;
3796
3797 highbit = 0;
3798 if (*input_line_pointer == '1')
3799 highbit = 0x80000000;
3800 else if (*input_line_pointer != '0')
3801 as_bad (_("expected 0 or 1"));
3802
3803 input_line_pointer++;
3804 if (*input_line_pointer != ',')
3805 as_bad (_("missing comma"));
3806 input_line_pointer++;
3807
3808 #ifdef md_flush_pending_output
3809 md_flush_pending_output ();
3810 #endif
3811
3812 #ifdef md_cons_align
3813 md_cons_align (4);
3814 #endif
3815
3816 mapping_state (MAP_DATA);
3817
3818 expression (&exp);
3819
3820 p = frag_more (4);
3821 md_number_to_chars (p, highbit, 4);
3822 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3823 BFD_RELOC_ARM_PREL31);
3824
3825 demand_empty_rest_of_line ();
3826 }
3827
3828 /* Directives: AEABI stack-unwind tables. */
3829
3830 /* Parse an unwind_fnstart directive. Simply records the current location. */
3831
3832 static void
3833 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3834 {
3835 demand_empty_rest_of_line ();
3836 if (unwind.proc_start)
3837 {
3838 as_bad (_("duplicate .fnstart directive"));
3839 return;
3840 }
3841
3842 /* Mark the start of the function. */
3843 unwind.proc_start = expr_build_dot ();
3844
3845 /* Reset the rest of the unwind info. */
3846 unwind.opcode_count = 0;
3847 unwind.table_entry = NULL;
3848 unwind.personality_routine = NULL;
3849 unwind.personality_index = -1;
3850 unwind.frame_size = 0;
3851 unwind.fp_offset = 0;
3852 unwind.fp_reg = REG_SP;
3853 unwind.fp_used = 0;
3854 unwind.sp_restored = 0;
3855 }
3856
3857
3858 /* Parse a handlerdata directive. Creates the exception handling table entry
3859 for the function. */
3860
3861 static void
3862 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3863 {
3864 demand_empty_rest_of_line ();
3865 if (!unwind.proc_start)
3866 as_bad (MISSING_FNSTART);
3867
3868 if (unwind.table_entry)
3869 as_bad (_("duplicate .handlerdata directive"));
3870
3871 create_unwind_entry (1);
3872 }
3873
3874 /* Parse an unwind_fnend directive. Generates the index table entry. */
3875
3876 static void
3877 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3878 {
3879 long where;
3880 char *ptr;
3881 valueT val;
3882 unsigned int marked_pr_dependency;
3883
3884 demand_empty_rest_of_line ();
3885
3886 if (!unwind.proc_start)
3887 {
3888 as_bad (_(".fnend directive without .fnstart"));
3889 return;
3890 }
3891
3892 /* Add eh table entry. */
3893 if (unwind.table_entry == NULL)
3894 val = create_unwind_entry (0);
3895 else
3896 val = 0;
3897
3898 /* Add index table entry. This is two words. */
3899 start_unwind_section (unwind.saved_seg, 1);
3900 frag_align (2, 0, 0);
3901 record_alignment (now_seg, 2);
3902
3903 ptr = frag_more (8);
3904 memset (ptr, 0, 8);
3905 where = frag_now_fix () - 8;
3906
3907 /* Self relative offset of the function start. */
3908 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3909 BFD_RELOC_ARM_PREL31);
3910
3911 /* Indicate dependency on EHABI-defined personality routines to the
3912 linker, if it hasn't been done already. */
3913 marked_pr_dependency
3914 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3915 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3916 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3917 {
3918 static const char *const name[] =
3919 {
3920 "__aeabi_unwind_cpp_pr0",
3921 "__aeabi_unwind_cpp_pr1",
3922 "__aeabi_unwind_cpp_pr2"
3923 };
3924 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3925 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3926 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3927 |= 1 << unwind.personality_index;
3928 }
3929
3930 if (val)
3931 /* Inline exception table entry. */
3932 md_number_to_chars (ptr + 4, val, 4);
3933 else
3934 /* Self relative offset of the table entry. */
3935 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3936 BFD_RELOC_ARM_PREL31);
3937
3938 /* Restore the original section. */
3939 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3940
3941 unwind.proc_start = NULL;
3942 }
3943
3944
3945 /* Parse an unwind_cantunwind directive. */
3946
3947 static void
3948 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3949 {
3950 demand_empty_rest_of_line ();
3951 if (!unwind.proc_start)
3952 as_bad (MISSING_FNSTART);
3953
3954 if (unwind.personality_routine || unwind.personality_index != -1)
3955 as_bad (_("personality routine specified for cantunwind frame"));
3956
3957 unwind.personality_index = -2;
3958 }
3959
3960
3961 /* Parse a personalityindex directive. */
3962
3963 static void
3964 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3965 {
3966 expressionS exp;
3967
3968 if (!unwind.proc_start)
3969 as_bad (MISSING_FNSTART);
3970
3971 if (unwind.personality_routine || unwind.personality_index != -1)
3972 as_bad (_("duplicate .personalityindex directive"));
3973
3974 expression (&exp);
3975
3976 if (exp.X_op != O_constant
3977 || exp.X_add_number < 0 || exp.X_add_number > 15)
3978 {
3979 as_bad (_("bad personality routine number"));
3980 ignore_rest_of_line ();
3981 return;
3982 }
3983
3984 unwind.personality_index = exp.X_add_number;
3985
3986 demand_empty_rest_of_line ();
3987 }
3988
3989
3990 /* Parse a personality directive. */
3991
3992 static void
3993 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3994 {
3995 char *name, *p, c;
3996
3997 if (!unwind.proc_start)
3998 as_bad (MISSING_FNSTART);
3999
4000 if (unwind.personality_routine || unwind.personality_index != -1)
4001 as_bad (_("duplicate .personality directive"));
4002
4003 c = get_symbol_name (& name);
4004 p = input_line_pointer;
4005 if (c == '"')
4006 ++ input_line_pointer;
4007 unwind.personality_routine = symbol_find_or_make (name);
4008 *p = c;
4009 demand_empty_rest_of_line ();
4010 }
4011
4012
4013 /* Parse a directive saving core registers. */
4014
4015 static void
4016 s_arm_unwind_save_core (void)
4017 {
4018 valueT op;
4019 long range;
4020 int n;
4021
4022 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4023 if (range == FAIL)
4024 {
4025 as_bad (_("expected register list"));
4026 ignore_rest_of_line ();
4027 return;
4028 }
4029
4030 demand_empty_rest_of_line ();
4031
4032 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4033 into .unwind_save {..., sp...}. We aren't bothered about the value of
4034 ip because it is clobbered by calls. */
4035 if (unwind.sp_restored && unwind.fp_reg == 12
4036 && (range & 0x3000) == 0x1000)
4037 {
4038 unwind.opcode_count--;
4039 unwind.sp_restored = 0;
4040 range = (range | 0x2000) & ~0x1000;
4041 unwind.pending_offset = 0;
4042 }
4043
4044 /* Pop r4-r15. */
4045 if (range & 0xfff0)
4046 {
4047 /* See if we can use the short opcodes. These pop a block of up to 8
4048 registers starting with r4, plus maybe r14. */
4049 for (n = 0; n < 8; n++)
4050 {
4051 /* Break at the first non-saved register. */
4052 if ((range & (1 << (n + 4))) == 0)
4053 break;
4054 }
4055 /* See if there are any other bits set. */
4056 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4057 {
4058 /* Use the long form. */
4059 op = 0x8000 | ((range >> 4) & 0xfff);
4060 add_unwind_opcode (op, 2);
4061 }
4062 else
4063 {
4064 /* Use the short form. */
4065 if (range & 0x4000)
4066 op = 0xa8; /* Pop r14. */
4067 else
4068 op = 0xa0; /* Do not pop r14. */
4069 op |= (n - 1);
4070 add_unwind_opcode (op, 1);
4071 }
4072 }
4073
4074 /* Pop r0-r3. */
4075 if (range & 0xf)
4076 {
4077 op = 0xb100 | (range & 0xf);
4078 add_unwind_opcode (op, 2);
4079 }
4080
4081 /* Record the number of bytes pushed. */
4082 for (n = 0; n < 16; n++)
4083 {
4084 if (range & (1 << n))
4085 unwind.frame_size += 4;
4086 }
4087 }
4088
4089
4090 /* Parse a directive saving FPA registers. */
4091
4092 static void
4093 s_arm_unwind_save_fpa (int reg)
4094 {
4095 expressionS exp;
4096 int num_regs;
4097 valueT op;
4098
4099 /* Get Number of registers to transfer. */
4100 if (skip_past_comma (&input_line_pointer) != FAIL)
4101 expression (&exp);
4102 else
4103 exp.X_op = O_illegal;
4104
4105 if (exp.X_op != O_constant)
4106 {
4107 as_bad (_("expected , <constant>"));
4108 ignore_rest_of_line ();
4109 return;
4110 }
4111
4112 num_regs = exp.X_add_number;
4113
4114 if (num_regs < 1 || num_regs > 4)
4115 {
4116 as_bad (_("number of registers must be in the range [1:4]"));
4117 ignore_rest_of_line ();
4118 return;
4119 }
4120
4121 demand_empty_rest_of_line ();
4122
4123 if (reg == 4)
4124 {
4125 /* Short form. */
4126 op = 0xb4 | (num_regs - 1);
4127 add_unwind_opcode (op, 1);
4128 }
4129 else
4130 {
4131 /* Long form. */
4132 op = 0xc800 | (reg << 4) | (num_regs - 1);
4133 add_unwind_opcode (op, 2);
4134 }
4135 unwind.frame_size += num_regs * 12;
4136 }
4137
4138
4139 /* Parse a directive saving VFP registers for ARMv6 and above. */
4140
4141 static void
4142 s_arm_unwind_save_vfp_armv6 (void)
4143 {
4144 int count;
4145 unsigned int start;
4146 valueT op;
4147 int num_vfpv3_regs = 0;
4148 int num_regs_below_16;
4149
4150 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4151 if (count == FAIL)
4152 {
4153 as_bad (_("expected register list"));
4154 ignore_rest_of_line ();
4155 return;
4156 }
4157
4158 demand_empty_rest_of_line ();
4159
4160 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4161 than FSTMX/FLDMX-style ones). */
4162
4163 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4164 if (start >= 16)
4165 num_vfpv3_regs = count;
4166 else if (start + count > 16)
4167 num_vfpv3_regs = start + count - 16;
4168
4169 if (num_vfpv3_regs > 0)
4170 {
4171 int start_offset = start > 16 ? start - 16 : 0;
4172 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4173 add_unwind_opcode (op, 2);
4174 }
4175
4176 /* Generate opcode for registers numbered in the range 0 .. 15. */
4177 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4178 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4179 if (num_regs_below_16 > 0)
4180 {
4181 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4182 add_unwind_opcode (op, 2);
4183 }
4184
4185 unwind.frame_size += count * 8;
4186 }
4187
4188
4189 /* Parse a directive saving VFP registers for pre-ARMv6. */
4190
4191 static void
4192 s_arm_unwind_save_vfp (void)
4193 {
4194 int count;
4195 unsigned int reg;
4196 valueT op;
4197
4198 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4199 if (count == FAIL)
4200 {
4201 as_bad (_("expected register list"));
4202 ignore_rest_of_line ();
4203 return;
4204 }
4205
4206 demand_empty_rest_of_line ();
4207
4208 if (reg == 8)
4209 {
4210 /* Short form. */
4211 op = 0xb8 | (count - 1);
4212 add_unwind_opcode (op, 1);
4213 }
4214 else
4215 {
4216 /* Long form. */
4217 op = 0xb300 | (reg << 4) | (count - 1);
4218 add_unwind_opcode (op, 2);
4219 }
4220 unwind.frame_size += count * 8 + 4;
4221 }
4222
4223
4224 /* Parse a directive saving iWMMXt data registers. */
4225
4226 static void
4227 s_arm_unwind_save_mmxwr (void)
4228 {
4229 int reg;
4230 int hi_reg;
4231 int i;
4232 unsigned mask = 0;
4233 valueT op;
4234
4235 if (*input_line_pointer == '{')
4236 input_line_pointer++;
4237
4238 do
4239 {
4240 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4241
4242 if (reg == FAIL)
4243 {
4244 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4245 goto error;
4246 }
4247
4248 if (mask >> reg)
4249 as_tsktsk (_("register list not in ascending order"));
4250 mask |= 1 << reg;
4251
4252 if (*input_line_pointer == '-')
4253 {
4254 input_line_pointer++;
4255 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4256 if (hi_reg == FAIL)
4257 {
4258 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4259 goto error;
4260 }
4261 else if (reg >= hi_reg)
4262 {
4263 as_bad (_("bad register range"));
4264 goto error;
4265 }
4266 for (; reg < hi_reg; reg++)
4267 mask |= 1 << reg;
4268 }
4269 }
4270 while (skip_past_comma (&input_line_pointer) != FAIL);
4271
4272 skip_past_char (&input_line_pointer, '}');
4273
4274 demand_empty_rest_of_line ();
4275
4276 /* Generate any deferred opcodes because we're going to be looking at
4277 the list. */
4278 flush_pending_unwind ();
4279
4280 for (i = 0; i < 16; i++)
4281 {
4282 if (mask & (1 << i))
4283 unwind.frame_size += 8;
4284 }
4285
4286 /* Attempt to combine with a previous opcode. We do this because gcc
4287 likes to output separate unwind directives for a single block of
4288 registers. */
4289 if (unwind.opcode_count > 0)
4290 {
4291 i = unwind.opcodes[unwind.opcode_count - 1];
4292 if ((i & 0xf8) == 0xc0)
4293 {
4294 i &= 7;
4295 /* Only merge if the blocks are contiguous. */
4296 if (i < 6)
4297 {
4298 if ((mask & 0xfe00) == (1 << 9))
4299 {
4300 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4301 unwind.opcode_count--;
4302 }
4303 }
4304 else if (i == 6 && unwind.opcode_count >= 2)
4305 {
4306 i = unwind.opcodes[unwind.opcode_count - 2];
4307 reg = i >> 4;
4308 i &= 0xf;
4309
4310 op = 0xffff << (reg - 1);
4311 if (reg > 0
4312 && ((mask & op) == (1u << (reg - 1))))
4313 {
4314 op = (1 << (reg + i + 1)) - 1;
4315 op &= ~((1 << reg) - 1);
4316 mask |= op;
4317 unwind.opcode_count -= 2;
4318 }
4319 }
4320 }
4321 }
4322
4323 hi_reg = 15;
4324 /* We want to generate opcodes in the order the registers have been
4325 saved, ie. descending order. */
4326 for (reg = 15; reg >= -1; reg--)
4327 {
4328 /* Save registers in blocks. */
4329 if (reg < 0
4330 || !(mask & (1 << reg)))
4331 {
4332 /* We found an unsaved reg. Generate opcodes to save the
4333 preceding block. */
4334 if (reg != hi_reg)
4335 {
4336 if (reg == 9)
4337 {
4338 /* Short form. */
4339 op = 0xc0 | (hi_reg - 10);
4340 add_unwind_opcode (op, 1);
4341 }
4342 else
4343 {
4344 /* Long form. */
4345 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4346 add_unwind_opcode (op, 2);
4347 }
4348 }
4349 hi_reg = reg - 1;
4350 }
4351 }
4352
4353 return;
4354 error:
4355 ignore_rest_of_line ();
4356 }
4357
4358 static void
4359 s_arm_unwind_save_mmxwcg (void)
4360 {
4361 int reg;
4362 int hi_reg;
4363 unsigned mask = 0;
4364 valueT op;
4365
4366 if (*input_line_pointer == '{')
4367 input_line_pointer++;
4368
4369 skip_whitespace (input_line_pointer);
4370
4371 do
4372 {
4373 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4374
4375 if (reg == FAIL)
4376 {
4377 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4378 goto error;
4379 }
4380
4381 reg -= 8;
4382 if (mask >> reg)
4383 as_tsktsk (_("register list not in ascending order"));
4384 mask |= 1 << reg;
4385
4386 if (*input_line_pointer == '-')
4387 {
4388 input_line_pointer++;
4389 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4390 if (hi_reg == FAIL)
4391 {
4392 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4393 goto error;
4394 }
4395 else if (reg >= hi_reg)
4396 {
4397 as_bad (_("bad register range"));
4398 goto error;
4399 }
4400 for (; reg < hi_reg; reg++)
4401 mask |= 1 << reg;
4402 }
4403 }
4404 while (skip_past_comma (&input_line_pointer) != FAIL);
4405
4406 skip_past_char (&input_line_pointer, '}');
4407
4408 demand_empty_rest_of_line ();
4409
4410 /* Generate any deferred opcodes because we're going to be looking at
4411 the list. */
4412 flush_pending_unwind ();
4413
4414 for (reg = 0; reg < 16; reg++)
4415 {
4416 if (mask & (1 << reg))
4417 unwind.frame_size += 4;
4418 }
4419 op = 0xc700 | mask;
4420 add_unwind_opcode (op, 2);
4421 return;
4422 error:
4423 ignore_rest_of_line ();
4424 }
4425
4426
4427 /* Parse an unwind_save directive.
4428 If the argument is non-zero, this is a .vsave directive. */
4429
4430 static void
4431 s_arm_unwind_save (int arch_v6)
4432 {
4433 char *peek;
4434 struct reg_entry *reg;
4435 bfd_boolean had_brace = FALSE;
4436
4437 if (!unwind.proc_start)
4438 as_bad (MISSING_FNSTART);
4439
4440 /* Figure out what sort of save we have. */
4441 peek = input_line_pointer;
4442
4443 if (*peek == '{')
4444 {
4445 had_brace = TRUE;
4446 peek++;
4447 }
4448
4449 reg = arm_reg_parse_multi (&peek);
4450
4451 if (!reg)
4452 {
4453 as_bad (_("register expected"));
4454 ignore_rest_of_line ();
4455 return;
4456 }
4457
4458 switch (reg->type)
4459 {
4460 case REG_TYPE_FN:
4461 if (had_brace)
4462 {
4463 as_bad (_("FPA .unwind_save does not take a register list"));
4464 ignore_rest_of_line ();
4465 return;
4466 }
4467 input_line_pointer = peek;
4468 s_arm_unwind_save_fpa (reg->number);
4469 return;
4470
4471 case REG_TYPE_RN:
4472 s_arm_unwind_save_core ();
4473 return;
4474
4475 case REG_TYPE_VFD:
4476 if (arch_v6)
4477 s_arm_unwind_save_vfp_armv6 ();
4478 else
4479 s_arm_unwind_save_vfp ();
4480 return;
4481
4482 case REG_TYPE_MMXWR:
4483 s_arm_unwind_save_mmxwr ();
4484 return;
4485
4486 case REG_TYPE_MMXWCG:
4487 s_arm_unwind_save_mmxwcg ();
4488 return;
4489
4490 default:
4491 as_bad (_(".unwind_save does not support this kind of register"));
4492 ignore_rest_of_line ();
4493 }
4494 }
4495
4496
4497 /* Parse an unwind_movsp directive. */
4498
4499 static void
4500 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4501 {
4502 int reg;
4503 valueT op;
4504 int offset;
4505
4506 if (!unwind.proc_start)
4507 as_bad (MISSING_FNSTART);
4508
4509 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4510 if (reg == FAIL)
4511 {
4512 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4513 ignore_rest_of_line ();
4514 return;
4515 }
4516
4517 /* Optional constant. */
4518 if (skip_past_comma (&input_line_pointer) != FAIL)
4519 {
4520 if (immediate_for_directive (&offset) == FAIL)
4521 return;
4522 }
4523 else
4524 offset = 0;
4525
4526 demand_empty_rest_of_line ();
4527
4528 if (reg == REG_SP || reg == REG_PC)
4529 {
4530 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4531 return;
4532 }
4533
4534 if (unwind.fp_reg != REG_SP)
4535 as_bad (_("unexpected .unwind_movsp directive"));
4536
4537 /* Generate opcode to restore the value. */
4538 op = 0x90 | reg;
4539 add_unwind_opcode (op, 1);
4540
4541 /* Record the information for later. */
4542 unwind.fp_reg = reg;
4543 unwind.fp_offset = unwind.frame_size - offset;
4544 unwind.sp_restored = 1;
4545 }
4546
4547 /* Parse an unwind_pad directive. */
4548
4549 static void
4550 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4551 {
4552 int offset;
4553
4554 if (!unwind.proc_start)
4555 as_bad (MISSING_FNSTART);
4556
4557 if (immediate_for_directive (&offset) == FAIL)
4558 return;
4559
4560 if (offset & 3)
4561 {
4562 as_bad (_("stack increment must be multiple of 4"));
4563 ignore_rest_of_line ();
4564 return;
4565 }
4566
4567 /* Don't generate any opcodes, just record the details for later. */
4568 unwind.frame_size += offset;
4569 unwind.pending_offset += offset;
4570
4571 demand_empty_rest_of_line ();
4572 }
4573
4574 /* Parse an unwind_setfp directive. */
4575
4576 static void
4577 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4578 {
4579 int sp_reg;
4580 int fp_reg;
4581 int offset;
4582
4583 if (!unwind.proc_start)
4584 as_bad (MISSING_FNSTART);
4585
4586 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4587 if (skip_past_comma (&input_line_pointer) == FAIL)
4588 sp_reg = FAIL;
4589 else
4590 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4591
4592 if (fp_reg == FAIL || sp_reg == FAIL)
4593 {
4594 as_bad (_("expected <reg>, <reg>"));
4595 ignore_rest_of_line ();
4596 return;
4597 }
4598
4599 /* Optional constant. */
4600 if (skip_past_comma (&input_line_pointer) != FAIL)
4601 {
4602 if (immediate_for_directive (&offset) == FAIL)
4603 return;
4604 }
4605 else
4606 offset = 0;
4607
4608 demand_empty_rest_of_line ();
4609
4610 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4611 {
4612 as_bad (_("register must be either sp or set by a previous"
4613 "unwind_movsp directive"));
4614 return;
4615 }
4616
4617 /* Don't generate any opcodes, just record the information for later. */
4618 unwind.fp_reg = fp_reg;
4619 unwind.fp_used = 1;
4620 if (sp_reg == REG_SP)
4621 unwind.fp_offset = unwind.frame_size - offset;
4622 else
4623 unwind.fp_offset -= offset;
4624 }
4625
4626 /* Parse an unwind_raw directive. */
4627
4628 static void
4629 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4630 {
4631 expressionS exp;
4632 /* This is an arbitrary limit. */
4633 unsigned char op[16];
4634 int count;
4635
4636 if (!unwind.proc_start)
4637 as_bad (MISSING_FNSTART);
4638
4639 expression (&exp);
4640 if (exp.X_op == O_constant
4641 && skip_past_comma (&input_line_pointer) != FAIL)
4642 {
4643 unwind.frame_size += exp.X_add_number;
4644 expression (&exp);
4645 }
4646 else
4647 exp.X_op = O_illegal;
4648
4649 if (exp.X_op != O_constant)
4650 {
4651 as_bad (_("expected <offset>, <opcode>"));
4652 ignore_rest_of_line ();
4653 return;
4654 }
4655
4656 count = 0;
4657
4658 /* Parse the opcode. */
4659 for (;;)
4660 {
4661 if (count >= 16)
4662 {
4663 as_bad (_("unwind opcode too long"));
4664 ignore_rest_of_line ();
4665 }
4666 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4667 {
4668 as_bad (_("invalid unwind opcode"));
4669 ignore_rest_of_line ();
4670 return;
4671 }
4672 op[count++] = exp.X_add_number;
4673
4674 /* Parse the next byte. */
4675 if (skip_past_comma (&input_line_pointer) == FAIL)
4676 break;
4677
4678 expression (&exp);
4679 }
4680
4681 /* Add the opcode bytes in reverse order. */
4682 while (count--)
4683 add_unwind_opcode (op[count], 1);
4684
4685 demand_empty_rest_of_line ();
4686 }
4687
4688
4689 /* Parse a .eabi_attribute directive. */
4690
4691 static void
4692 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4693 {
4694 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4695
4696 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4697 attributes_set_explicitly[tag] = 1;
4698 }
4699
4700 /* Emit a tls fix for the symbol. */
4701
4702 static void
4703 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4704 {
4705 char *p;
4706 expressionS exp;
4707 #ifdef md_flush_pending_output
4708 md_flush_pending_output ();
4709 #endif
4710
4711 #ifdef md_cons_align
4712 md_cons_align (4);
4713 #endif
4714
4715 /* Since we're just labelling the code, there's no need to define a
4716 mapping symbol. */
4717 expression (&exp);
4718 p = obstack_next_free (&frchain_now->frch_obstack);
4719 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4720 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4721 : BFD_RELOC_ARM_TLS_DESCSEQ);
4722 }
4723 #endif /* OBJ_ELF */
4724
4725 static void s_arm_arch (int);
4726 static void s_arm_object_arch (int);
4727 static void s_arm_cpu (int);
4728 static void s_arm_fpu (int);
4729 static void s_arm_arch_extension (int);
4730
4731 #ifdef TE_PE
4732
4733 static void
4734 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4735 {
4736 expressionS exp;
4737
4738 do
4739 {
4740 expression (&exp);
4741 if (exp.X_op == O_symbol)
4742 exp.X_op = O_secrel;
4743
4744 emit_expr (&exp, 4);
4745 }
4746 while (*input_line_pointer++ == ',');
4747
4748 input_line_pointer--;
4749 demand_empty_rest_of_line ();
4750 }
4751 #endif /* TE_PE */
4752
4753 /* This table describes all the machine specific pseudo-ops the assembler
4754 has to support. The fields are:
4755 pseudo-op name without dot
4756 function to call to execute this pseudo-op
4757 Integer arg to pass to the function. */
4758
4759 const pseudo_typeS md_pseudo_table[] =
4760 {
4761 /* Never called because '.req' does not start a line. */
4762 { "req", s_req, 0 },
4763 /* Following two are likewise never called. */
4764 { "dn", s_dn, 0 },
4765 { "qn", s_qn, 0 },
4766 { "unreq", s_unreq, 0 },
4767 { "bss", s_bss, 0 },
4768 { "align", s_align_ptwo, 2 },
4769 { "arm", s_arm, 0 },
4770 { "thumb", s_thumb, 0 },
4771 { "code", s_code, 0 },
4772 { "force_thumb", s_force_thumb, 0 },
4773 { "thumb_func", s_thumb_func, 0 },
4774 { "thumb_set", s_thumb_set, 0 },
4775 { "even", s_even, 0 },
4776 { "ltorg", s_ltorg, 0 },
4777 { "pool", s_ltorg, 0 },
4778 { "syntax", s_syntax, 0 },
4779 { "cpu", s_arm_cpu, 0 },
4780 { "arch", s_arm_arch, 0 },
4781 { "object_arch", s_arm_object_arch, 0 },
4782 { "fpu", s_arm_fpu, 0 },
4783 { "arch_extension", s_arm_arch_extension, 0 },
4784 #ifdef OBJ_ELF
4785 { "word", s_arm_elf_cons, 4 },
4786 { "long", s_arm_elf_cons, 4 },
4787 { "inst.n", s_arm_elf_inst, 2 },
4788 { "inst.w", s_arm_elf_inst, 4 },
4789 { "inst", s_arm_elf_inst, 0 },
4790 { "rel31", s_arm_rel31, 0 },
4791 { "fnstart", s_arm_unwind_fnstart, 0 },
4792 { "fnend", s_arm_unwind_fnend, 0 },
4793 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4794 { "personality", s_arm_unwind_personality, 0 },
4795 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4796 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4797 { "save", s_arm_unwind_save, 0 },
4798 { "vsave", s_arm_unwind_save, 1 },
4799 { "movsp", s_arm_unwind_movsp, 0 },
4800 { "pad", s_arm_unwind_pad, 0 },
4801 { "setfp", s_arm_unwind_setfp, 0 },
4802 { "unwind_raw", s_arm_unwind_raw, 0 },
4803 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4804 { "tlsdescseq", s_arm_tls_descseq, 0 },
4805 #else
4806 { "word", cons, 4},
4807
4808 /* These are used for dwarf. */
4809 {"2byte", cons, 2},
4810 {"4byte", cons, 4},
4811 {"8byte", cons, 8},
4812 /* These are used for dwarf2. */
4813 { "file", dwarf2_directive_file, 0 },
4814 { "loc", dwarf2_directive_loc, 0 },
4815 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4816 #endif
4817 { "extend", float_cons, 'x' },
4818 { "ldouble", float_cons, 'x' },
4819 { "packed", float_cons, 'p' },
4820 #ifdef TE_PE
4821 {"secrel32", pe_directive_secrel, 0},
4822 #endif
4823
4824 /* These are for compatibility with CodeComposer Studio. */
4825 {"ref", s_ccs_ref, 0},
4826 {"def", s_ccs_def, 0},
4827 {"asmfunc", s_ccs_asmfunc, 0},
4828 {"endasmfunc", s_ccs_endasmfunc, 0},
4829
4830 { 0, 0, 0 }
4831 };
4832 \f
4833 /* Parser functions used exclusively in instruction operands. */
4834
4835 /* Generic immediate-value read function for use in insn parsing.
4836 STR points to the beginning of the immediate (the leading #);
4837 VAL receives the value; if the value is outside [MIN, MAX]
4838 issue an error. PREFIX_OPT is true if the immediate prefix is
4839 optional. */
4840
4841 static int
4842 parse_immediate (char **str, int *val, int min, int max,
4843 bfd_boolean prefix_opt)
4844 {
4845 expressionS exp;
4846
4847 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4848 if (exp.X_op != O_constant)
4849 {
4850 inst.error = _("constant expression required");
4851 return FAIL;
4852 }
4853
4854 if (exp.X_add_number < min || exp.X_add_number > max)
4855 {
4856 inst.error = _("immediate value out of range");
4857 return FAIL;
4858 }
4859
4860 *val = exp.X_add_number;
4861 return SUCCESS;
4862 }
4863
4864 /* Less-generic immediate-value read function with the possibility of loading a
4865 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4866 instructions. Puts the result directly in inst.operands[i]. */
4867
4868 static int
4869 parse_big_immediate (char **str, int i, expressionS *in_exp,
4870 bfd_boolean allow_symbol_p)
4871 {
4872 expressionS exp;
4873 expressionS *exp_p = in_exp ? in_exp : &exp;
4874 char *ptr = *str;
4875
4876 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4877
4878 if (exp_p->X_op == O_constant)
4879 {
4880 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4881 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4882 O_constant. We have to be careful not to break compilation for
4883 32-bit X_add_number, though. */
4884 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4885 {
4886 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4887 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4888 & 0xffffffff);
4889 inst.operands[i].regisimm = 1;
4890 }
4891 }
4892 else if (exp_p->X_op == O_big
4893 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4894 {
4895 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4896
4897 /* Bignums have their least significant bits in
4898 generic_bignum[0]. Make sure we put 32 bits in imm and
4899 32 bits in reg, in a (hopefully) portable way. */
4900 gas_assert (parts != 0);
4901
4902 /* Make sure that the number is not too big.
4903 PR 11972: Bignums can now be sign-extended to the
4904 size of a .octa so check that the out of range bits
4905 are all zero or all one. */
4906 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4907 {
4908 LITTLENUM_TYPE m = -1;
4909
4910 if (generic_bignum[parts * 2] != 0
4911 && generic_bignum[parts * 2] != m)
4912 return FAIL;
4913
4914 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4915 if (generic_bignum[j] != generic_bignum[j-1])
4916 return FAIL;
4917 }
4918
4919 inst.operands[i].imm = 0;
4920 for (j = 0; j < parts; j++, idx++)
4921 inst.operands[i].imm |= generic_bignum[idx]
4922 << (LITTLENUM_NUMBER_OF_BITS * j);
4923 inst.operands[i].reg = 0;
4924 for (j = 0; j < parts; j++, idx++)
4925 inst.operands[i].reg |= generic_bignum[idx]
4926 << (LITTLENUM_NUMBER_OF_BITS * j);
4927 inst.operands[i].regisimm = 1;
4928 }
4929 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4930 return FAIL;
4931
4932 *str = ptr;
4933
4934 return SUCCESS;
4935 }
4936
4937 /* Returns the pseudo-register number of an FPA immediate constant,
4938 or FAIL if there isn't a valid constant here. */
4939
4940 static int
4941 parse_fpa_immediate (char ** str)
4942 {
4943 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4944 char * save_in;
4945 expressionS exp;
4946 int i;
4947 int j;
4948
4949 /* First try and match exact strings, this is to guarantee
4950 that some formats will work even for cross assembly. */
4951
4952 for (i = 0; fp_const[i]; i++)
4953 {
4954 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4955 {
4956 char *start = *str;
4957
4958 *str += strlen (fp_const[i]);
4959 if (is_end_of_line[(unsigned char) **str])
4960 return i + 8;
4961 *str = start;
4962 }
4963 }
4964
4965 /* Just because we didn't get a match doesn't mean that the constant
4966 isn't valid, just that it is in a format that we don't
4967 automatically recognize. Try parsing it with the standard
4968 expression routines. */
4969
4970 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4971
4972 /* Look for a raw floating point number. */
4973 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4974 && is_end_of_line[(unsigned char) *save_in])
4975 {
4976 for (i = 0; i < NUM_FLOAT_VALS; i++)
4977 {
4978 for (j = 0; j < MAX_LITTLENUMS; j++)
4979 {
4980 if (words[j] != fp_values[i][j])
4981 break;
4982 }
4983
4984 if (j == MAX_LITTLENUMS)
4985 {
4986 *str = save_in;
4987 return i + 8;
4988 }
4989 }
4990 }
4991
4992 /* Try and parse a more complex expression, this will probably fail
4993 unless the code uses a floating point prefix (eg "0f"). */
4994 save_in = input_line_pointer;
4995 input_line_pointer = *str;
4996 if (expression (&exp) == absolute_section
4997 && exp.X_op == O_big
4998 && exp.X_add_number < 0)
4999 {
5000 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5001 Ditto for 15. */
5002 #define X_PRECISION 5
5003 #define E_PRECISION 15L
5004 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5005 {
5006 for (i = 0; i < NUM_FLOAT_VALS; i++)
5007 {
5008 for (j = 0; j < MAX_LITTLENUMS; j++)
5009 {
5010 if (words[j] != fp_values[i][j])
5011 break;
5012 }
5013
5014 if (j == MAX_LITTLENUMS)
5015 {
5016 *str = input_line_pointer;
5017 input_line_pointer = save_in;
5018 return i + 8;
5019 }
5020 }
5021 }
5022 }
5023
5024 *str = input_line_pointer;
5025 input_line_pointer = save_in;
5026 inst.error = _("invalid FPA immediate expression");
5027 return FAIL;
5028 }
5029
5030 /* Returns 1 if a number has "quarter-precision" float format
5031 0baBbbbbbc defgh000 00000000 00000000. */
5032
5033 static int
5034 is_quarter_float (unsigned imm)
5035 {
5036 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5037 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5038 }
5039
5040
5041 /* Detect the presence of a floating point or integer zero constant,
5042 i.e. #0.0 or #0. */
5043
5044 static bfd_boolean
5045 parse_ifimm_zero (char **in)
5046 {
5047 int error_code;
5048
5049 if (!is_immediate_prefix (**in))
5050 {
5051 /* In unified syntax, all prefixes are optional. */
5052 if (!unified_syntax)
5053 return FALSE;
5054 }
5055 else
5056 ++*in;
5057
5058 /* Accept #0x0 as a synonym for #0. */
5059 if (strncmp (*in, "0x", 2) == 0)
5060 {
5061 int val;
5062 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5063 return FALSE;
5064 return TRUE;
5065 }
5066
5067 error_code = atof_generic (in, ".", EXP_CHARS,
5068 &generic_floating_point_number);
5069
5070 if (!error_code
5071 && generic_floating_point_number.sign == '+'
5072 && (generic_floating_point_number.low
5073 > generic_floating_point_number.leader))
5074 return TRUE;
5075
5076 return FALSE;
5077 }
5078
5079 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5080 0baBbbbbbc defgh000 00000000 00000000.
5081 The zero and minus-zero cases need special handling, since they can't be
5082 encoded in the "quarter-precision" float format, but can nonetheless be
5083 loaded as integer constants. */
5084
5085 static unsigned
5086 parse_qfloat_immediate (char **ccp, int *immed)
5087 {
5088 char *str = *ccp;
5089 char *fpnum;
5090 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5091 int found_fpchar = 0;
5092
5093 skip_past_char (&str, '#');
5094
5095 /* We must not accidentally parse an integer as a floating-point number. Make
5096 sure that the value we parse is not an integer by checking for special
5097 characters '.' or 'e'.
5098 FIXME: This is a horrible hack, but doing better is tricky because type
5099 information isn't in a very usable state at parse time. */
5100 fpnum = str;
5101 skip_whitespace (fpnum);
5102
5103 if (strncmp (fpnum, "0x", 2) == 0)
5104 return FAIL;
5105 else
5106 {
5107 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5108 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5109 {
5110 found_fpchar = 1;
5111 break;
5112 }
5113
5114 if (!found_fpchar)
5115 return FAIL;
5116 }
5117
5118 if ((str = atof_ieee (str, 's', words)) != NULL)
5119 {
5120 unsigned fpword = 0;
5121 int i;
5122
5123 /* Our FP word must be 32 bits (single-precision FP). */
5124 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5125 {
5126 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5127 fpword |= words[i];
5128 }
5129
5130 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5131 *immed = fpword;
5132 else
5133 return FAIL;
5134
5135 *ccp = str;
5136
5137 return SUCCESS;
5138 }
5139
5140 return FAIL;
5141 }
5142
5143 /* Shift operands. */
5144 enum shift_kind
5145 {
5146 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5147 };
5148
5149 struct asm_shift_name
5150 {
5151 const char *name;
5152 enum shift_kind kind;
5153 };
5154
5155 /* Third argument to parse_shift. */
5156 enum parse_shift_mode
5157 {
5158 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5159 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5160 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5161 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5162 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5163 };
5164
5165 /* Parse a <shift> specifier on an ARM data processing instruction.
5166 This has three forms:
5167
5168 (LSL|LSR|ASL|ASR|ROR) Rs
5169 (LSL|LSR|ASL|ASR|ROR) #imm
5170 RRX
5171
5172 Note that ASL is assimilated to LSL in the instruction encoding, and
5173 RRX to ROR #0 (which cannot be written as such). */
5174
5175 static int
5176 parse_shift (char **str, int i, enum parse_shift_mode mode)
5177 {
5178 const struct asm_shift_name *shift_name;
5179 enum shift_kind shift;
5180 char *s = *str;
5181 char *p = s;
5182 int reg;
5183
5184 for (p = *str; ISALPHA (*p); p++)
5185 ;
5186
5187 if (p == *str)
5188 {
5189 inst.error = _("shift expression expected");
5190 return FAIL;
5191 }
5192
5193 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5194 p - *str);
5195
5196 if (shift_name == NULL)
5197 {
5198 inst.error = _("shift expression expected");
5199 return FAIL;
5200 }
5201
5202 shift = shift_name->kind;
5203
5204 switch (mode)
5205 {
5206 case NO_SHIFT_RESTRICT:
5207 case SHIFT_IMMEDIATE: break;
5208
5209 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5210 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5211 {
5212 inst.error = _("'LSL' or 'ASR' required");
5213 return FAIL;
5214 }
5215 break;
5216
5217 case SHIFT_LSL_IMMEDIATE:
5218 if (shift != SHIFT_LSL)
5219 {
5220 inst.error = _("'LSL' required");
5221 return FAIL;
5222 }
5223 break;
5224
5225 case SHIFT_ASR_IMMEDIATE:
5226 if (shift != SHIFT_ASR)
5227 {
5228 inst.error = _("'ASR' required");
5229 return FAIL;
5230 }
5231 break;
5232
5233 default: abort ();
5234 }
5235
5236 if (shift != SHIFT_RRX)
5237 {
5238 /* Whitespace can appear here if the next thing is a bare digit. */
5239 skip_whitespace (p);
5240
5241 if (mode == NO_SHIFT_RESTRICT
5242 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5243 {
5244 inst.operands[i].imm = reg;
5245 inst.operands[i].immisreg = 1;
5246 }
5247 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5248 return FAIL;
5249 }
5250 inst.operands[i].shift_kind = shift;
5251 inst.operands[i].shifted = 1;
5252 *str = p;
5253 return SUCCESS;
5254 }
5255
5256 /* Parse a <shifter_operand> for an ARM data processing instruction:
5257
5258 #<immediate>
5259 #<immediate>, <rotate>
5260 <Rm>
5261 <Rm>, <shift>
5262
5263 where <shift> is defined by parse_shift above, and <rotate> is a
5264 multiple of 2 between 0 and 30. Validation of immediate operands
5265 is deferred to md_apply_fix. */
5266
5267 static int
5268 parse_shifter_operand (char **str, int i)
5269 {
5270 int value;
5271 expressionS exp;
5272
5273 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5274 {
5275 inst.operands[i].reg = value;
5276 inst.operands[i].isreg = 1;
5277
5278 /* parse_shift will override this if appropriate */
5279 inst.relocs[0].exp.X_op = O_constant;
5280 inst.relocs[0].exp.X_add_number = 0;
5281
5282 if (skip_past_comma (str) == FAIL)
5283 return SUCCESS;
5284
5285 /* Shift operation on register. */
5286 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5287 }
5288
5289 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5290 return FAIL;
5291
5292 if (skip_past_comma (str) == SUCCESS)
5293 {
5294 /* #x, y -- ie explicit rotation by Y. */
5295 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5296 return FAIL;
5297
5298 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5299 {
5300 inst.error = _("constant expression expected");
5301 return FAIL;
5302 }
5303
5304 value = exp.X_add_number;
5305 if (value < 0 || value > 30 || value % 2 != 0)
5306 {
5307 inst.error = _("invalid rotation");
5308 return FAIL;
5309 }
5310 if (inst.relocs[0].exp.X_add_number < 0
5311 || inst.relocs[0].exp.X_add_number > 255)
5312 {
5313 inst.error = _("invalid constant");
5314 return FAIL;
5315 }
5316
5317 /* Encode as specified. */
5318 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5319 return SUCCESS;
5320 }
5321
5322 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5323 inst.relocs[0].pc_rel = 0;
5324 return SUCCESS;
5325 }
5326
5327 /* Group relocation information. Each entry in the table contains the
5328 textual name of the relocation as may appear in assembler source
5329 and must end with a colon.
5330 Along with this textual name are the relocation codes to be used if
5331 the corresponding instruction is an ALU instruction (ADD or SUB only),
5332 an LDR, an LDRS, or an LDC. */
5333
5334 struct group_reloc_table_entry
5335 {
5336 const char *name;
5337 int alu_code;
5338 int ldr_code;
5339 int ldrs_code;
5340 int ldc_code;
5341 };
5342
5343 typedef enum
5344 {
5345 /* Varieties of non-ALU group relocation. */
5346
5347 GROUP_LDR,
5348 GROUP_LDRS,
5349 GROUP_LDC
5350 } group_reloc_type;
5351
5352 static struct group_reloc_table_entry group_reloc_table[] =
5353 { /* Program counter relative: */
5354 { "pc_g0_nc",
5355 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5356 0, /* LDR */
5357 0, /* LDRS */
5358 0 }, /* LDC */
5359 { "pc_g0",
5360 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5361 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5362 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5363 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5364 { "pc_g1_nc",
5365 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5366 0, /* LDR */
5367 0, /* LDRS */
5368 0 }, /* LDC */
5369 { "pc_g1",
5370 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5371 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5372 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5373 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5374 { "pc_g2",
5375 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5376 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5377 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5378 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5379 /* Section base relative */
5380 { "sb_g0_nc",
5381 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5382 0, /* LDR */
5383 0, /* LDRS */
5384 0 }, /* LDC */
5385 { "sb_g0",
5386 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5387 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5388 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5389 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5390 { "sb_g1_nc",
5391 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5392 0, /* LDR */
5393 0, /* LDRS */
5394 0 }, /* LDC */
5395 { "sb_g1",
5396 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5397 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5398 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5399 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5400 { "sb_g2",
5401 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5402 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5403 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5404 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5405 /* Absolute thumb alu relocations. */
5406 { "lower0_7",
5407 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5408 0, /* LDR. */
5409 0, /* LDRS. */
5410 0 }, /* LDC. */
5411 { "lower8_15",
5412 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5413 0, /* LDR. */
5414 0, /* LDRS. */
5415 0 }, /* LDC. */
5416 { "upper0_7",
5417 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5418 0, /* LDR. */
5419 0, /* LDRS. */
5420 0 }, /* LDC. */
5421 { "upper8_15",
5422 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5423 0, /* LDR. */
5424 0, /* LDRS. */
5425 0 } }; /* LDC. */
5426
5427 /* Given the address of a pointer pointing to the textual name of a group
5428 relocation as may appear in assembler source, attempt to find its details
5429 in group_reloc_table. The pointer will be updated to the character after
5430 the trailing colon. On failure, FAIL will be returned; SUCCESS
5431 otherwise. On success, *entry will be updated to point at the relevant
5432 group_reloc_table entry. */
5433
5434 static int
5435 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5436 {
5437 unsigned int i;
5438 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5439 {
5440 int length = strlen (group_reloc_table[i].name);
5441
5442 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5443 && (*str)[length] == ':')
5444 {
5445 *out = &group_reloc_table[i];
5446 *str += (length + 1);
5447 return SUCCESS;
5448 }
5449 }
5450
5451 return FAIL;
5452 }
5453
5454 /* Parse a <shifter_operand> for an ARM data processing instruction
5455 (as for parse_shifter_operand) where group relocations are allowed:
5456
5457 #<immediate>
5458 #<immediate>, <rotate>
5459 #:<group_reloc>:<expression>
5460 <Rm>
5461 <Rm>, <shift>
5462
5463 where <group_reloc> is one of the strings defined in group_reloc_table.
5464 The hashes are optional.
5465
5466 Everything else is as for parse_shifter_operand. */
5467
5468 static parse_operand_result
5469 parse_shifter_operand_group_reloc (char **str, int i)
5470 {
5471 /* Determine if we have the sequence of characters #: or just :
5472 coming next. If we do, then we check for a group relocation.
5473 If we don't, punt the whole lot to parse_shifter_operand. */
5474
5475 if (((*str)[0] == '#' && (*str)[1] == ':')
5476 || (*str)[0] == ':')
5477 {
5478 struct group_reloc_table_entry *entry;
5479
5480 if ((*str)[0] == '#')
5481 (*str) += 2;
5482 else
5483 (*str)++;
5484
5485 /* Try to parse a group relocation. Anything else is an error. */
5486 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5487 {
5488 inst.error = _("unknown group relocation");
5489 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5490 }
5491
5492 /* We now have the group relocation table entry corresponding to
5493 the name in the assembler source. Next, we parse the expression. */
5494 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5495 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5496
5497 /* Record the relocation type (always the ALU variant here). */
5498 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5499 gas_assert (inst.relocs[0].type != 0);
5500
5501 return PARSE_OPERAND_SUCCESS;
5502 }
5503 else
5504 return parse_shifter_operand (str, i) == SUCCESS
5505 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5506
5507 /* Never reached. */
5508 }
5509
5510 /* Parse a Neon alignment expression. Information is written to
5511 inst.operands[i]. We assume the initial ':' has been skipped.
5512
5513 align .imm = align << 8, .immisalign=1, .preind=0 */
5514 static parse_operand_result
5515 parse_neon_alignment (char **str, int i)
5516 {
5517 char *p = *str;
5518 expressionS exp;
5519
5520 my_get_expression (&exp, &p, GE_NO_PREFIX);
5521
5522 if (exp.X_op != O_constant)
5523 {
5524 inst.error = _("alignment must be constant");
5525 return PARSE_OPERAND_FAIL;
5526 }
5527
5528 inst.operands[i].imm = exp.X_add_number << 8;
5529 inst.operands[i].immisalign = 1;
5530 /* Alignments are not pre-indexes. */
5531 inst.operands[i].preind = 0;
5532
5533 *str = p;
5534 return PARSE_OPERAND_SUCCESS;
5535 }
5536
5537 /* Parse all forms of an ARM address expression. Information is written
5538 to inst.operands[i] and/or inst.relocs[0].
5539
5540 Preindexed addressing (.preind=1):
5541
5542 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5543 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5544 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5545 .shift_kind=shift .relocs[0].exp=shift_imm
5546
5547 These three may have a trailing ! which causes .writeback to be set also.
5548
5549 Postindexed addressing (.postind=1, .writeback=1):
5550
5551 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5552 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5553 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5554 .shift_kind=shift .relocs[0].exp=shift_imm
5555
5556 Unindexed addressing (.preind=0, .postind=0):
5557
5558 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5559
5560 Other:
5561
5562 [Rn]{!} shorthand for [Rn,#0]{!}
5563 =immediate .isreg=0 .relocs[0].exp=immediate
5564 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5565
5566 It is the caller's responsibility to check for addressing modes not
5567 supported by the instruction, and to set inst.relocs[0].type. */
5568
5569 static parse_operand_result
5570 parse_address_main (char **str, int i, int group_relocations,
5571 group_reloc_type group_type)
5572 {
5573 char *p = *str;
5574 int reg;
5575
5576 if (skip_past_char (&p, '[') == FAIL)
5577 {
5578 if (skip_past_char (&p, '=') == FAIL)
5579 {
5580 /* Bare address - translate to PC-relative offset. */
5581 inst.relocs[0].pc_rel = 1;
5582 inst.operands[i].reg = REG_PC;
5583 inst.operands[i].isreg = 1;
5584 inst.operands[i].preind = 1;
5585
5586 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5587 return PARSE_OPERAND_FAIL;
5588 }
5589 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5590 /*allow_symbol_p=*/TRUE))
5591 return PARSE_OPERAND_FAIL;
5592
5593 *str = p;
5594 return PARSE_OPERAND_SUCCESS;
5595 }
5596
5597 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5598 skip_whitespace (p);
5599
5600 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5601 {
5602 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5603 return PARSE_OPERAND_FAIL;
5604 }
5605 inst.operands[i].reg = reg;
5606 inst.operands[i].isreg = 1;
5607
5608 if (skip_past_comma (&p) == SUCCESS)
5609 {
5610 inst.operands[i].preind = 1;
5611
5612 if (*p == '+') p++;
5613 else if (*p == '-') p++, inst.operands[i].negative = 1;
5614
5615 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5616 {
5617 inst.operands[i].imm = reg;
5618 inst.operands[i].immisreg = 1;
5619
5620 if (skip_past_comma (&p) == SUCCESS)
5621 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5622 return PARSE_OPERAND_FAIL;
5623 }
5624 else if (skip_past_char (&p, ':') == SUCCESS)
5625 {
5626 /* FIXME: '@' should be used here, but it's filtered out by generic
5627 code before we get to see it here. This may be subject to
5628 change. */
5629 parse_operand_result result = parse_neon_alignment (&p, i);
5630
5631 if (result != PARSE_OPERAND_SUCCESS)
5632 return result;
5633 }
5634 else
5635 {
5636 if (inst.operands[i].negative)
5637 {
5638 inst.operands[i].negative = 0;
5639 p--;
5640 }
5641
5642 if (group_relocations
5643 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5644 {
5645 struct group_reloc_table_entry *entry;
5646
5647 /* Skip over the #: or : sequence. */
5648 if (*p == '#')
5649 p += 2;
5650 else
5651 p++;
5652
5653 /* Try to parse a group relocation. Anything else is an
5654 error. */
5655 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5656 {
5657 inst.error = _("unknown group relocation");
5658 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5659 }
5660
5661 /* We now have the group relocation table entry corresponding to
5662 the name in the assembler source. Next, we parse the
5663 expression. */
5664 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5665 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5666
5667 /* Record the relocation type. */
5668 switch (group_type)
5669 {
5670 case GROUP_LDR:
5671 inst.relocs[0].type
5672 = (bfd_reloc_code_real_type) entry->ldr_code;
5673 break;
5674
5675 case GROUP_LDRS:
5676 inst.relocs[0].type
5677 = (bfd_reloc_code_real_type) entry->ldrs_code;
5678 break;
5679
5680 case GROUP_LDC:
5681 inst.relocs[0].type
5682 = (bfd_reloc_code_real_type) entry->ldc_code;
5683 break;
5684
5685 default:
5686 gas_assert (0);
5687 }
5688
5689 if (inst.relocs[0].type == 0)
5690 {
5691 inst.error = _("this group relocation is not allowed on this instruction");
5692 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5693 }
5694 }
5695 else
5696 {
5697 char *q = p;
5698
5699 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5700 return PARSE_OPERAND_FAIL;
5701 /* If the offset is 0, find out if it's a +0 or -0. */
5702 if (inst.relocs[0].exp.X_op == O_constant
5703 && inst.relocs[0].exp.X_add_number == 0)
5704 {
5705 skip_whitespace (q);
5706 if (*q == '#')
5707 {
5708 q++;
5709 skip_whitespace (q);
5710 }
5711 if (*q == '-')
5712 inst.operands[i].negative = 1;
5713 }
5714 }
5715 }
5716 }
5717 else if (skip_past_char (&p, ':') == SUCCESS)
5718 {
5719 /* FIXME: '@' should be used here, but it's filtered out by generic code
5720 before we get to see it here. This may be subject to change. */
5721 parse_operand_result result = parse_neon_alignment (&p, i);
5722
5723 if (result != PARSE_OPERAND_SUCCESS)
5724 return result;
5725 }
5726
5727 if (skip_past_char (&p, ']') == FAIL)
5728 {
5729 inst.error = _("']' expected");
5730 return PARSE_OPERAND_FAIL;
5731 }
5732
5733 if (skip_past_char (&p, '!') == SUCCESS)
5734 inst.operands[i].writeback = 1;
5735
5736 else if (skip_past_comma (&p) == SUCCESS)
5737 {
5738 if (skip_past_char (&p, '{') == SUCCESS)
5739 {
5740 /* [Rn], {expr} - unindexed, with option */
5741 if (parse_immediate (&p, &inst.operands[i].imm,
5742 0, 255, TRUE) == FAIL)
5743 return PARSE_OPERAND_FAIL;
5744
5745 if (skip_past_char (&p, '}') == FAIL)
5746 {
5747 inst.error = _("'}' expected at end of 'option' field");
5748 return PARSE_OPERAND_FAIL;
5749 }
5750 if (inst.operands[i].preind)
5751 {
5752 inst.error = _("cannot combine index with option");
5753 return PARSE_OPERAND_FAIL;
5754 }
5755 *str = p;
5756 return PARSE_OPERAND_SUCCESS;
5757 }
5758 else
5759 {
5760 inst.operands[i].postind = 1;
5761 inst.operands[i].writeback = 1;
5762
5763 if (inst.operands[i].preind)
5764 {
5765 inst.error = _("cannot combine pre- and post-indexing");
5766 return PARSE_OPERAND_FAIL;
5767 }
5768
5769 if (*p == '+') p++;
5770 else if (*p == '-') p++, inst.operands[i].negative = 1;
5771
5772 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5773 {
5774 /* We might be using the immediate for alignment already. If we
5775 are, OR the register number into the low-order bits. */
5776 if (inst.operands[i].immisalign)
5777 inst.operands[i].imm |= reg;
5778 else
5779 inst.operands[i].imm = reg;
5780 inst.operands[i].immisreg = 1;
5781
5782 if (skip_past_comma (&p) == SUCCESS)
5783 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5784 return PARSE_OPERAND_FAIL;
5785 }
5786 else
5787 {
5788 char *q = p;
5789
5790 if (inst.operands[i].negative)
5791 {
5792 inst.operands[i].negative = 0;
5793 p--;
5794 }
5795 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5796 return PARSE_OPERAND_FAIL;
5797 /* If the offset is 0, find out if it's a +0 or -0. */
5798 if (inst.relocs[0].exp.X_op == O_constant
5799 && inst.relocs[0].exp.X_add_number == 0)
5800 {
5801 skip_whitespace (q);
5802 if (*q == '#')
5803 {
5804 q++;
5805 skip_whitespace (q);
5806 }
5807 if (*q == '-')
5808 inst.operands[i].negative = 1;
5809 }
5810 }
5811 }
5812 }
5813
5814 /* If at this point neither .preind nor .postind is set, we have a
5815 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5816 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5817 {
5818 inst.operands[i].preind = 1;
5819 inst.relocs[0].exp.X_op = O_constant;
5820 inst.relocs[0].exp.X_add_number = 0;
5821 }
5822 *str = p;
5823 return PARSE_OPERAND_SUCCESS;
5824 }
5825
5826 static int
5827 parse_address (char **str, int i)
5828 {
5829 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5830 ? SUCCESS : FAIL;
5831 }
5832
5833 static parse_operand_result
5834 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5835 {
5836 return parse_address_main (str, i, 1, type);
5837 }
5838
5839 /* Parse an operand for a MOVW or MOVT instruction. */
5840 static int
5841 parse_half (char **str)
5842 {
5843 char * p;
5844
5845 p = *str;
5846 skip_past_char (&p, '#');
5847 if (strncasecmp (p, ":lower16:", 9) == 0)
5848 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
5849 else if (strncasecmp (p, ":upper16:", 9) == 0)
5850 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
5851
5852 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
5853 {
5854 p += 9;
5855 skip_whitespace (p);
5856 }
5857
5858 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5859 return FAIL;
5860
5861 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
5862 {
5863 if (inst.relocs[0].exp.X_op != O_constant)
5864 {
5865 inst.error = _("constant expression expected");
5866 return FAIL;
5867 }
5868 if (inst.relocs[0].exp.X_add_number < 0
5869 || inst.relocs[0].exp.X_add_number > 0xffff)
5870 {
5871 inst.error = _("immediate value out of range");
5872 return FAIL;
5873 }
5874 }
5875 *str = p;
5876 return SUCCESS;
5877 }
5878
5879 /* Miscellaneous. */
5880
5881 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5882 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5883 static int
5884 parse_psr (char **str, bfd_boolean lhs)
5885 {
5886 char *p;
5887 unsigned long psr_field;
5888 const struct asm_psr *psr;
5889 char *start;
5890 bfd_boolean is_apsr = FALSE;
5891 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5892
5893 /* PR gas/12698: If the user has specified -march=all then m_profile will
5894 be TRUE, but we want to ignore it in this case as we are building for any
5895 CPU type, including non-m variants. */
5896 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5897 m_profile = FALSE;
5898
5899 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5900 feature for ease of use and backwards compatibility. */
5901 p = *str;
5902 if (strncasecmp (p, "SPSR", 4) == 0)
5903 {
5904 if (m_profile)
5905 goto unsupported_psr;
5906
5907 psr_field = SPSR_BIT;
5908 }
5909 else if (strncasecmp (p, "CPSR", 4) == 0)
5910 {
5911 if (m_profile)
5912 goto unsupported_psr;
5913
5914 psr_field = 0;
5915 }
5916 else if (strncasecmp (p, "APSR", 4) == 0)
5917 {
5918 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5919 and ARMv7-R architecture CPUs. */
5920 is_apsr = TRUE;
5921 psr_field = 0;
5922 }
5923 else if (m_profile)
5924 {
5925 start = p;
5926 do
5927 p++;
5928 while (ISALNUM (*p) || *p == '_');
5929
5930 if (strncasecmp (start, "iapsr", 5) == 0
5931 || strncasecmp (start, "eapsr", 5) == 0
5932 || strncasecmp (start, "xpsr", 4) == 0
5933 || strncasecmp (start, "psr", 3) == 0)
5934 p = start + strcspn (start, "rR") + 1;
5935
5936 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5937 p - start);
5938
5939 if (!psr)
5940 return FAIL;
5941
5942 /* If APSR is being written, a bitfield may be specified. Note that
5943 APSR itself is handled above. */
5944 if (psr->field <= 3)
5945 {
5946 psr_field = psr->field;
5947 is_apsr = TRUE;
5948 goto check_suffix;
5949 }
5950
5951 *str = p;
5952 /* M-profile MSR instructions have the mask field set to "10", except
5953 *PSR variants which modify APSR, which may use a different mask (and
5954 have been handled already). Do that by setting the PSR_f field
5955 here. */
5956 return psr->field | (lhs ? PSR_f : 0);
5957 }
5958 else
5959 goto unsupported_psr;
5960
5961 p += 4;
5962 check_suffix:
5963 if (*p == '_')
5964 {
5965 /* A suffix follows. */
5966 p++;
5967 start = p;
5968
5969 do
5970 p++;
5971 while (ISALNUM (*p) || *p == '_');
5972
5973 if (is_apsr)
5974 {
5975 /* APSR uses a notation for bits, rather than fields. */
5976 unsigned int nzcvq_bits = 0;
5977 unsigned int g_bit = 0;
5978 char *bit;
5979
5980 for (bit = start; bit != p; bit++)
5981 {
5982 switch (TOLOWER (*bit))
5983 {
5984 case 'n':
5985 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5986 break;
5987
5988 case 'z':
5989 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5990 break;
5991
5992 case 'c':
5993 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5994 break;
5995
5996 case 'v':
5997 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5998 break;
5999
6000 case 'q':
6001 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6002 break;
6003
6004 case 'g':
6005 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6006 break;
6007
6008 default:
6009 inst.error = _("unexpected bit specified after APSR");
6010 return FAIL;
6011 }
6012 }
6013
6014 if (nzcvq_bits == 0x1f)
6015 psr_field |= PSR_f;
6016
6017 if (g_bit == 0x1)
6018 {
6019 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6020 {
6021 inst.error = _("selected processor does not "
6022 "support DSP extension");
6023 return FAIL;
6024 }
6025
6026 psr_field |= PSR_s;
6027 }
6028
6029 if ((nzcvq_bits & 0x20) != 0
6030 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6031 || (g_bit & 0x2) != 0)
6032 {
6033 inst.error = _("bad bitmask specified after APSR");
6034 return FAIL;
6035 }
6036 }
6037 else
6038 {
6039 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6040 p - start);
6041 if (!psr)
6042 goto error;
6043
6044 psr_field |= psr->field;
6045 }
6046 }
6047 else
6048 {
6049 if (ISALNUM (*p))
6050 goto error; /* Garbage after "[CS]PSR". */
6051
6052 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6053 is deprecated, but allow it anyway. */
6054 if (is_apsr && lhs)
6055 {
6056 psr_field |= PSR_f;
6057 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6058 "deprecated"));
6059 }
6060 else if (!m_profile)
6061 /* These bits are never right for M-profile devices: don't set them
6062 (only code paths which read/write APSR reach here). */
6063 psr_field |= (PSR_c | PSR_f);
6064 }
6065 *str = p;
6066 return psr_field;
6067
6068 unsupported_psr:
6069 inst.error = _("selected processor does not support requested special "
6070 "purpose register");
6071 return FAIL;
6072
6073 error:
6074 inst.error = _("flag for {c}psr instruction expected");
6075 return FAIL;
6076 }
6077
6078 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6079 value suitable for splatting into the AIF field of the instruction. */
6080
6081 static int
6082 parse_cps_flags (char **str)
6083 {
6084 int val = 0;
6085 int saw_a_flag = 0;
6086 char *s = *str;
6087
6088 for (;;)
6089 switch (*s++)
6090 {
6091 case '\0': case ',':
6092 goto done;
6093
6094 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6095 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6096 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6097
6098 default:
6099 inst.error = _("unrecognized CPS flag");
6100 return FAIL;
6101 }
6102
6103 done:
6104 if (saw_a_flag == 0)
6105 {
6106 inst.error = _("missing CPS flags");
6107 return FAIL;
6108 }
6109
6110 *str = s - 1;
6111 return val;
6112 }
6113
6114 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6115 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6116
6117 static int
6118 parse_endian_specifier (char **str)
6119 {
6120 int little_endian;
6121 char *s = *str;
6122
6123 if (strncasecmp (s, "BE", 2))
6124 little_endian = 0;
6125 else if (strncasecmp (s, "LE", 2))
6126 little_endian = 1;
6127 else
6128 {
6129 inst.error = _("valid endian specifiers are be or le");
6130 return FAIL;
6131 }
6132
6133 if (ISALNUM (s[2]) || s[2] == '_')
6134 {
6135 inst.error = _("valid endian specifiers are be or le");
6136 return FAIL;
6137 }
6138
6139 *str = s + 2;
6140 return little_endian;
6141 }
6142
6143 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6144 value suitable for poking into the rotate field of an sxt or sxta
6145 instruction, or FAIL on error. */
6146
6147 static int
6148 parse_ror (char **str)
6149 {
6150 int rot;
6151 char *s = *str;
6152
6153 if (strncasecmp (s, "ROR", 3) == 0)
6154 s += 3;
6155 else
6156 {
6157 inst.error = _("missing rotation field after comma");
6158 return FAIL;
6159 }
6160
6161 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6162 return FAIL;
6163
6164 switch (rot)
6165 {
6166 case 0: *str = s; return 0x0;
6167 case 8: *str = s; return 0x1;
6168 case 16: *str = s; return 0x2;
6169 case 24: *str = s; return 0x3;
6170
6171 default:
6172 inst.error = _("rotation can only be 0, 8, 16, or 24");
6173 return FAIL;
6174 }
6175 }
6176
6177 /* Parse a conditional code (from conds[] below). The value returned is in the
6178 range 0 .. 14, or FAIL. */
6179 static int
6180 parse_cond (char **str)
6181 {
6182 char *q;
6183 const struct asm_cond *c;
6184 int n;
6185 /* Condition codes are always 2 characters, so matching up to
6186 3 characters is sufficient. */
6187 char cond[3];
6188
6189 q = *str;
6190 n = 0;
6191 while (ISALPHA (*q) && n < 3)
6192 {
6193 cond[n] = TOLOWER (*q);
6194 q++;
6195 n++;
6196 }
6197
6198 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6199 if (!c)
6200 {
6201 inst.error = _("condition required");
6202 return FAIL;
6203 }
6204
6205 *str = q;
6206 return c->value;
6207 }
6208
6209 /* Record a use of the given feature. */
6210 static void
6211 record_feature_use (const arm_feature_set *feature)
6212 {
6213 if (thumb_mode)
6214 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6215 else
6216 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6217 }
6218
6219 /* If the given feature is currently allowed, mark it as used and return TRUE.
6220 Return FALSE otherwise. */
6221 static bfd_boolean
6222 mark_feature_used (const arm_feature_set *feature)
6223 {
6224 /* Ensure the option is currently allowed. */
6225 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6226 return FALSE;
6227
6228 /* Add the appropriate architecture feature for the barrier option used. */
6229 record_feature_use (feature);
6230
6231 return TRUE;
6232 }
6233
6234 /* Parse an option for a barrier instruction. Returns the encoding for the
6235 option, or FAIL. */
6236 static int
6237 parse_barrier (char **str)
6238 {
6239 char *p, *q;
6240 const struct asm_barrier_opt *o;
6241
6242 p = q = *str;
6243 while (ISALPHA (*q))
6244 q++;
6245
6246 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6247 q - p);
6248 if (!o)
6249 return FAIL;
6250
6251 if (!mark_feature_used (&o->arch))
6252 return FAIL;
6253
6254 *str = q;
6255 return o->value;
6256 }
6257
6258 /* Parse the operands of a table branch instruction. Similar to a memory
6259 operand. */
6260 static int
6261 parse_tb (char **str)
6262 {
6263 char * p = *str;
6264 int reg;
6265
6266 if (skip_past_char (&p, '[') == FAIL)
6267 {
6268 inst.error = _("'[' expected");
6269 return FAIL;
6270 }
6271
6272 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6273 {
6274 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6275 return FAIL;
6276 }
6277 inst.operands[0].reg = reg;
6278
6279 if (skip_past_comma (&p) == FAIL)
6280 {
6281 inst.error = _("',' expected");
6282 return FAIL;
6283 }
6284
6285 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6286 {
6287 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6288 return FAIL;
6289 }
6290 inst.operands[0].imm = reg;
6291
6292 if (skip_past_comma (&p) == SUCCESS)
6293 {
6294 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6295 return FAIL;
6296 if (inst.relocs[0].exp.X_add_number != 1)
6297 {
6298 inst.error = _("invalid shift");
6299 return FAIL;
6300 }
6301 inst.operands[0].shifted = 1;
6302 }
6303
6304 if (skip_past_char (&p, ']') == FAIL)
6305 {
6306 inst.error = _("']' expected");
6307 return FAIL;
6308 }
6309 *str = p;
6310 return SUCCESS;
6311 }
6312
6313 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6314 information on the types the operands can take and how they are encoded.
6315 Up to four operands may be read; this function handles setting the
6316 ".present" field for each read operand itself.
6317 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6318 else returns FAIL. */
6319
6320 static int
6321 parse_neon_mov (char **str, int *which_operand)
6322 {
6323 int i = *which_operand, val;
6324 enum arm_reg_type rtype;
6325 char *ptr = *str;
6326 struct neon_type_el optype;
6327
6328 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6329 {
6330 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6331 inst.operands[i].reg = val;
6332 inst.operands[i].isscalar = 1;
6333 inst.operands[i].vectype = optype;
6334 inst.operands[i++].present = 1;
6335
6336 if (skip_past_comma (&ptr) == FAIL)
6337 goto wanted_comma;
6338
6339 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6340 goto wanted_arm;
6341
6342 inst.operands[i].reg = val;
6343 inst.operands[i].isreg = 1;
6344 inst.operands[i].present = 1;
6345 }
6346 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6347 != FAIL)
6348 {
6349 /* Cases 0, 1, 2, 3, 5 (D only). */
6350 if (skip_past_comma (&ptr) == FAIL)
6351 goto wanted_comma;
6352
6353 inst.operands[i].reg = val;
6354 inst.operands[i].isreg = 1;
6355 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6356 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6357 inst.operands[i].isvec = 1;
6358 inst.operands[i].vectype = optype;
6359 inst.operands[i++].present = 1;
6360
6361 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6362 {
6363 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6364 Case 13: VMOV <Sd>, <Rm> */
6365 inst.operands[i].reg = val;
6366 inst.operands[i].isreg = 1;
6367 inst.operands[i].present = 1;
6368
6369 if (rtype == REG_TYPE_NQ)
6370 {
6371 first_error (_("can't use Neon quad register here"));
6372 return FAIL;
6373 }
6374 else if (rtype != REG_TYPE_VFS)
6375 {
6376 i++;
6377 if (skip_past_comma (&ptr) == FAIL)
6378 goto wanted_comma;
6379 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6380 goto wanted_arm;
6381 inst.operands[i].reg = val;
6382 inst.operands[i].isreg = 1;
6383 inst.operands[i].present = 1;
6384 }
6385 }
6386 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6387 &optype)) != FAIL)
6388 {
6389 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6390 Case 1: VMOV<c><q> <Dd>, <Dm>
6391 Case 8: VMOV.F32 <Sd>, <Sm>
6392 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6393
6394 inst.operands[i].reg = val;
6395 inst.operands[i].isreg = 1;
6396 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6397 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6398 inst.operands[i].isvec = 1;
6399 inst.operands[i].vectype = optype;
6400 inst.operands[i].present = 1;
6401
6402 if (skip_past_comma (&ptr) == SUCCESS)
6403 {
6404 /* Case 15. */
6405 i++;
6406
6407 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6408 goto wanted_arm;
6409
6410 inst.operands[i].reg = val;
6411 inst.operands[i].isreg = 1;
6412 inst.operands[i++].present = 1;
6413
6414 if (skip_past_comma (&ptr) == FAIL)
6415 goto wanted_comma;
6416
6417 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6418 goto wanted_arm;
6419
6420 inst.operands[i].reg = val;
6421 inst.operands[i].isreg = 1;
6422 inst.operands[i].present = 1;
6423 }
6424 }
6425 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6426 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6427 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6428 Case 10: VMOV.F32 <Sd>, #<imm>
6429 Case 11: VMOV.F64 <Dd>, #<imm> */
6430 inst.operands[i].immisfloat = 1;
6431 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6432 == SUCCESS)
6433 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6434 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6435 ;
6436 else
6437 {
6438 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6439 return FAIL;
6440 }
6441 }
6442 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6443 {
6444 /* Cases 6, 7. */
6445 inst.operands[i].reg = val;
6446 inst.operands[i].isreg = 1;
6447 inst.operands[i++].present = 1;
6448
6449 if (skip_past_comma (&ptr) == FAIL)
6450 goto wanted_comma;
6451
6452 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6453 {
6454 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6455 inst.operands[i].reg = val;
6456 inst.operands[i].isscalar = 1;
6457 inst.operands[i].present = 1;
6458 inst.operands[i].vectype = optype;
6459 }
6460 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6461 {
6462 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6463 inst.operands[i].reg = val;
6464 inst.operands[i].isreg = 1;
6465 inst.operands[i++].present = 1;
6466
6467 if (skip_past_comma (&ptr) == FAIL)
6468 goto wanted_comma;
6469
6470 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6471 == FAIL)
6472 {
6473 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6474 return FAIL;
6475 }
6476
6477 inst.operands[i].reg = val;
6478 inst.operands[i].isreg = 1;
6479 inst.operands[i].isvec = 1;
6480 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6481 inst.operands[i].vectype = optype;
6482 inst.operands[i].present = 1;
6483
6484 if (rtype == REG_TYPE_VFS)
6485 {
6486 /* Case 14. */
6487 i++;
6488 if (skip_past_comma (&ptr) == FAIL)
6489 goto wanted_comma;
6490 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6491 &optype)) == FAIL)
6492 {
6493 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6494 return FAIL;
6495 }
6496 inst.operands[i].reg = val;
6497 inst.operands[i].isreg = 1;
6498 inst.operands[i].isvec = 1;
6499 inst.operands[i].issingle = 1;
6500 inst.operands[i].vectype = optype;
6501 inst.operands[i].present = 1;
6502 }
6503 }
6504 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6505 != FAIL)
6506 {
6507 /* Case 13. */
6508 inst.operands[i].reg = val;
6509 inst.operands[i].isreg = 1;
6510 inst.operands[i].isvec = 1;
6511 inst.operands[i].issingle = 1;
6512 inst.operands[i].vectype = optype;
6513 inst.operands[i].present = 1;
6514 }
6515 }
6516 else
6517 {
6518 first_error (_("parse error"));
6519 return FAIL;
6520 }
6521
6522 /* Successfully parsed the operands. Update args. */
6523 *which_operand = i;
6524 *str = ptr;
6525 return SUCCESS;
6526
6527 wanted_comma:
6528 first_error (_("expected comma"));
6529 return FAIL;
6530
6531 wanted_arm:
6532 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6533 return FAIL;
6534 }
6535
6536 /* Use this macro when the operand constraints are different
6537 for ARM and THUMB (e.g. ldrd). */
6538 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6539 ((arm_operand) | ((thumb_operand) << 16))
6540
6541 /* Matcher codes for parse_operands. */
6542 enum operand_parse_code
6543 {
6544 OP_stop, /* end of line */
6545
6546 OP_RR, /* ARM register */
6547 OP_RRnpc, /* ARM register, not r15 */
6548 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6549 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6550 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6551 optional trailing ! */
6552 OP_RRw, /* ARM register, not r15, optional trailing ! */
6553 OP_RCP, /* Coprocessor number */
6554 OP_RCN, /* Coprocessor register */
6555 OP_RF, /* FPA register */
6556 OP_RVS, /* VFP single precision register */
6557 OP_RVD, /* VFP double precision register (0..15) */
6558 OP_RND, /* Neon double precision register (0..31) */
6559 OP_RNQ, /* Neon quad precision register */
6560 OP_RVSD, /* VFP single or double precision register */
6561 OP_RNSD, /* Neon single or double precision register */
6562 OP_RNDQ, /* Neon double or quad precision register */
6563 OP_RNSDQ, /* Neon single, double or quad precision register */
6564 OP_RNSC, /* Neon scalar D[X] */
6565 OP_RVC, /* VFP control register */
6566 OP_RMF, /* Maverick F register */
6567 OP_RMD, /* Maverick D register */
6568 OP_RMFX, /* Maverick FX register */
6569 OP_RMDX, /* Maverick DX register */
6570 OP_RMAX, /* Maverick AX register */
6571 OP_RMDS, /* Maverick DSPSC register */
6572 OP_RIWR, /* iWMMXt wR register */
6573 OP_RIWC, /* iWMMXt wC register */
6574 OP_RIWG, /* iWMMXt wCG register */
6575 OP_RXA, /* XScale accumulator register */
6576
6577 /* New operands for Armv8.1-M Mainline. */
6578 OP_LR, /* ARM LR register */
6579 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6580
6581 OP_REGLST, /* ARM register list */
6582 OP_CLRMLST, /* CLRM register list */
6583 OP_VRSLST, /* VFP single-precision register list */
6584 OP_VRDLST, /* VFP double-precision register list */
6585 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6586 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6587 OP_NSTRLST, /* Neon element/structure list */
6588
6589 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6590 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6591 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6592 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6593 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6594 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6595 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6596 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6597 OP_VMOV, /* Neon VMOV operands. */
6598 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6599 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6600 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6601
6602 OP_I0, /* immediate zero */
6603 OP_I7, /* immediate value 0 .. 7 */
6604 OP_I15, /* 0 .. 15 */
6605 OP_I16, /* 1 .. 16 */
6606 OP_I16z, /* 0 .. 16 */
6607 OP_I31, /* 0 .. 31 */
6608 OP_I31w, /* 0 .. 31, optional trailing ! */
6609 OP_I32, /* 1 .. 32 */
6610 OP_I32z, /* 0 .. 32 */
6611 OP_I63, /* 0 .. 63 */
6612 OP_I63s, /* -64 .. 63 */
6613 OP_I64, /* 1 .. 64 */
6614 OP_I64z, /* 0 .. 64 */
6615 OP_I255, /* 0 .. 255 */
6616
6617 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6618 OP_I7b, /* 0 .. 7 */
6619 OP_I15b, /* 0 .. 15 */
6620 OP_I31b, /* 0 .. 31 */
6621
6622 OP_SH, /* shifter operand */
6623 OP_SHG, /* shifter operand with possible group relocation */
6624 OP_ADDR, /* Memory address expression (any mode) */
6625 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6626 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6627 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6628 OP_EXP, /* arbitrary expression */
6629 OP_EXPi, /* same, with optional immediate prefix */
6630 OP_EXPr, /* same, with optional relocation suffix */
6631 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6632 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6633 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6634 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6635
6636 OP_CPSF, /* CPS flags */
6637 OP_ENDI, /* Endianness specifier */
6638 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6639 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6640 OP_COND, /* conditional code */
6641 OP_TB, /* Table branch. */
6642
6643 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6644
6645 OP_RRnpc_I0, /* ARM register or literal 0 */
6646 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6647 OP_RR_EXi, /* ARM register or expression with imm prefix */
6648 OP_RF_IF, /* FPA register or immediate */
6649 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6650 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6651
6652 /* Optional operands. */
6653 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6654 OP_oI31b, /* 0 .. 31 */
6655 OP_oI32b, /* 1 .. 32 */
6656 OP_oI32z, /* 0 .. 32 */
6657 OP_oIffffb, /* 0 .. 65535 */
6658 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6659
6660 OP_oRR, /* ARM register */
6661 OP_oLR, /* ARM LR register */
6662 OP_oRRnpc, /* ARM register, not the PC */
6663 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6664 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6665 OP_oRND, /* Optional Neon double precision register */
6666 OP_oRNQ, /* Optional Neon quad precision register */
6667 OP_oRNDQ, /* Optional Neon double or quad precision register */
6668 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6669 OP_oSHll, /* LSL immediate */
6670 OP_oSHar, /* ASR immediate */
6671 OP_oSHllar, /* LSL or ASR immediate */
6672 OP_oROR, /* ROR 0/8/16/24 */
6673 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6674
6675 /* Some pre-defined mixed (ARM/THUMB) operands. */
6676 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6677 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6678 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6679
6680 OP_FIRST_OPTIONAL = OP_oI7b
6681 };
6682
6683 /* Generic instruction operand parser. This does no encoding and no
6684 semantic validation; it merely squirrels values away in the inst
6685 structure. Returns SUCCESS or FAIL depending on whether the
6686 specified grammar matched. */
6687 static int
6688 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6689 {
6690 unsigned const int *upat = pattern;
6691 char *backtrack_pos = 0;
6692 const char *backtrack_error = 0;
6693 int i, val = 0, backtrack_index = 0;
6694 enum arm_reg_type rtype;
6695 parse_operand_result result;
6696 unsigned int op_parse_code;
6697
6698 #define po_char_or_fail(chr) \
6699 do \
6700 { \
6701 if (skip_past_char (&str, chr) == FAIL) \
6702 goto bad_args; \
6703 } \
6704 while (0)
6705
6706 #define po_reg_or_fail(regtype) \
6707 do \
6708 { \
6709 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6710 & inst.operands[i].vectype); \
6711 if (val == FAIL) \
6712 { \
6713 first_error (_(reg_expected_msgs[regtype])); \
6714 goto failure; \
6715 } \
6716 inst.operands[i].reg = val; \
6717 inst.operands[i].isreg = 1; \
6718 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6719 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6720 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6721 || rtype == REG_TYPE_VFD \
6722 || rtype == REG_TYPE_NQ); \
6723 } \
6724 while (0)
6725
6726 #define po_reg_or_goto(regtype, label) \
6727 do \
6728 { \
6729 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6730 & inst.operands[i].vectype); \
6731 if (val == FAIL) \
6732 goto label; \
6733 \
6734 inst.operands[i].reg = val; \
6735 inst.operands[i].isreg = 1; \
6736 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6737 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6738 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6739 || rtype == REG_TYPE_VFD \
6740 || rtype == REG_TYPE_NQ); \
6741 } \
6742 while (0)
6743
6744 #define po_imm_or_fail(min, max, popt) \
6745 do \
6746 { \
6747 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6748 goto failure; \
6749 inst.operands[i].imm = val; \
6750 } \
6751 while (0)
6752
6753 #define po_scalar_or_goto(elsz, label) \
6754 do \
6755 { \
6756 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6757 if (val == FAIL) \
6758 goto label; \
6759 inst.operands[i].reg = val; \
6760 inst.operands[i].isscalar = 1; \
6761 } \
6762 while (0)
6763
6764 #define po_misc_or_fail(expr) \
6765 do \
6766 { \
6767 if (expr) \
6768 goto failure; \
6769 } \
6770 while (0)
6771
6772 #define po_misc_or_fail_no_backtrack(expr) \
6773 do \
6774 { \
6775 result = expr; \
6776 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6777 backtrack_pos = 0; \
6778 if (result != PARSE_OPERAND_SUCCESS) \
6779 goto failure; \
6780 } \
6781 while (0)
6782
6783 #define po_barrier_or_imm(str) \
6784 do \
6785 { \
6786 val = parse_barrier (&str); \
6787 if (val == FAIL && ! ISALPHA (*str)) \
6788 goto immediate; \
6789 if (val == FAIL \
6790 /* ISB can only take SY as an option. */ \
6791 || ((inst.instruction & 0xf0) == 0x60 \
6792 && val != 0xf)) \
6793 { \
6794 inst.error = _("invalid barrier type"); \
6795 backtrack_pos = 0; \
6796 goto failure; \
6797 } \
6798 } \
6799 while (0)
6800
6801 skip_whitespace (str);
6802
6803 for (i = 0; upat[i] != OP_stop; i++)
6804 {
6805 op_parse_code = upat[i];
6806 if (op_parse_code >= 1<<16)
6807 op_parse_code = thumb ? (op_parse_code >> 16)
6808 : (op_parse_code & ((1<<16)-1));
6809
6810 if (op_parse_code >= OP_FIRST_OPTIONAL)
6811 {
6812 /* Remember where we are in case we need to backtrack. */
6813 gas_assert (!backtrack_pos);
6814 backtrack_pos = str;
6815 backtrack_error = inst.error;
6816 backtrack_index = i;
6817 }
6818
6819 if (i > 0 && (i > 1 || inst.operands[0].present))
6820 po_char_or_fail (',');
6821
6822 switch (op_parse_code)
6823 {
6824 /* Registers */
6825 case OP_oRRnpc:
6826 case OP_oRRnpcsp:
6827 case OP_RRnpc:
6828 case OP_RRnpcsp:
6829 case OP_oRR:
6830 case OP_LR:
6831 case OP_oLR:
6832 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6833 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6834 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6835 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6836 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6837 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6838 case OP_oRND:
6839 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6840 case OP_RVC:
6841 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6842 break;
6843 /* Also accept generic coprocessor regs for unknown registers. */
6844 coproc_reg:
6845 po_reg_or_fail (REG_TYPE_CN);
6846 break;
6847 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6848 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6849 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6850 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6851 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6852 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6853 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6854 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6855 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6856 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6857 case OP_oRNQ:
6858 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6859 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6860 case OP_oRNDQ:
6861 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6862 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6863 case OP_oRNSDQ:
6864 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6865
6866 /* Neon scalar. Using an element size of 8 means that some invalid
6867 scalars are accepted here, so deal with those in later code. */
6868 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6869
6870 case OP_RNDQ_I0:
6871 {
6872 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6873 break;
6874 try_imm0:
6875 po_imm_or_fail (0, 0, TRUE);
6876 }
6877 break;
6878
6879 case OP_RVSD_I0:
6880 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6881 break;
6882
6883 case OP_RSVD_FI0:
6884 {
6885 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6886 break;
6887 try_ifimm0:
6888 if (parse_ifimm_zero (&str))
6889 inst.operands[i].imm = 0;
6890 else
6891 {
6892 inst.error
6893 = _("only floating point zero is allowed as immediate value");
6894 goto failure;
6895 }
6896 }
6897 break;
6898
6899 case OP_RR_RNSC:
6900 {
6901 po_scalar_or_goto (8, try_rr);
6902 break;
6903 try_rr:
6904 po_reg_or_fail (REG_TYPE_RN);
6905 }
6906 break;
6907
6908 case OP_RNSDQ_RNSC:
6909 {
6910 po_scalar_or_goto (8, try_nsdq);
6911 break;
6912 try_nsdq:
6913 po_reg_or_fail (REG_TYPE_NSDQ);
6914 }
6915 break;
6916
6917 case OP_RNSD_RNSC:
6918 {
6919 po_scalar_or_goto (8, try_s_scalar);
6920 break;
6921 try_s_scalar:
6922 po_scalar_or_goto (4, try_nsd);
6923 break;
6924 try_nsd:
6925 po_reg_or_fail (REG_TYPE_NSD);
6926 }
6927 break;
6928
6929 case OP_RNDQ_RNSC:
6930 {
6931 po_scalar_or_goto (8, try_ndq);
6932 break;
6933 try_ndq:
6934 po_reg_or_fail (REG_TYPE_NDQ);
6935 }
6936 break;
6937
6938 case OP_RND_RNSC:
6939 {
6940 po_scalar_or_goto (8, try_vfd);
6941 break;
6942 try_vfd:
6943 po_reg_or_fail (REG_TYPE_VFD);
6944 }
6945 break;
6946
6947 case OP_VMOV:
6948 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6949 not careful then bad things might happen. */
6950 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6951 break;
6952
6953 case OP_RNDQ_Ibig:
6954 {
6955 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6956 break;
6957 try_immbig:
6958 /* There's a possibility of getting a 64-bit immediate here, so
6959 we need special handling. */
6960 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6961 == FAIL)
6962 {
6963 inst.error = _("immediate value is out of range");
6964 goto failure;
6965 }
6966 }
6967 break;
6968
6969 case OP_RNDQ_I63b:
6970 {
6971 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6972 break;
6973 try_shimm:
6974 po_imm_or_fail (0, 63, TRUE);
6975 }
6976 break;
6977
6978 case OP_RRnpcb:
6979 po_char_or_fail ('[');
6980 po_reg_or_fail (REG_TYPE_RN);
6981 po_char_or_fail (']');
6982 break;
6983
6984 case OP_RRnpctw:
6985 case OP_RRw:
6986 case OP_oRRw:
6987 po_reg_or_fail (REG_TYPE_RN);
6988 if (skip_past_char (&str, '!') == SUCCESS)
6989 inst.operands[i].writeback = 1;
6990 break;
6991
6992 /* Immediates */
6993 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6994 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6995 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6996 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6997 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6998 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6999 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7000 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7001 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7002 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7003 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7004 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7005
7006 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7007 case OP_oI7b:
7008 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7009 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7010 case OP_oI31b:
7011 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7012 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7013 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7014 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7015
7016 /* Immediate variants */
7017 case OP_oI255c:
7018 po_char_or_fail ('{');
7019 po_imm_or_fail (0, 255, TRUE);
7020 po_char_or_fail ('}');
7021 break;
7022
7023 case OP_I31w:
7024 /* The expression parser chokes on a trailing !, so we have
7025 to find it first and zap it. */
7026 {
7027 char *s = str;
7028 while (*s && *s != ',')
7029 s++;
7030 if (s[-1] == '!')
7031 {
7032 s[-1] = '\0';
7033 inst.operands[i].writeback = 1;
7034 }
7035 po_imm_or_fail (0, 31, TRUE);
7036 if (str == s - 1)
7037 str = s;
7038 }
7039 break;
7040
7041 /* Expressions */
7042 case OP_EXPi: EXPi:
7043 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7044 GE_OPT_PREFIX));
7045 break;
7046
7047 case OP_EXP:
7048 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7049 GE_NO_PREFIX));
7050 break;
7051
7052 case OP_EXPr: EXPr:
7053 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7054 GE_NO_PREFIX));
7055 if (inst.relocs[0].exp.X_op == O_symbol)
7056 {
7057 val = parse_reloc (&str);
7058 if (val == -1)
7059 {
7060 inst.error = _("unrecognized relocation suffix");
7061 goto failure;
7062 }
7063 else if (val != BFD_RELOC_UNUSED)
7064 {
7065 inst.operands[i].imm = val;
7066 inst.operands[i].hasreloc = 1;
7067 }
7068 }
7069 break;
7070
7071 case OP_EXPs:
7072 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7073 GE_NO_PREFIX));
7074 if (inst.relocs[i].exp.X_op == O_symbol)
7075 {
7076 inst.operands[i].hasreloc = 1;
7077 }
7078 else if (inst.relocs[i].exp.X_op == O_constant)
7079 {
7080 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7081 inst.operands[i].hasreloc = 0;
7082 }
7083 break;
7084
7085 /* Operand for MOVW or MOVT. */
7086 case OP_HALF:
7087 po_misc_or_fail (parse_half (&str));
7088 break;
7089
7090 /* Register or expression. */
7091 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7092 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7093
7094 /* Register or immediate. */
7095 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7096 I0: po_imm_or_fail (0, 0, FALSE); break;
7097
7098 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7099 IF:
7100 if (!is_immediate_prefix (*str))
7101 goto bad_args;
7102 str++;
7103 val = parse_fpa_immediate (&str);
7104 if (val == FAIL)
7105 goto failure;
7106 /* FPA immediates are encoded as registers 8-15.
7107 parse_fpa_immediate has already applied the offset. */
7108 inst.operands[i].reg = val;
7109 inst.operands[i].isreg = 1;
7110 break;
7111
7112 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7113 I32z: po_imm_or_fail (0, 32, FALSE); break;
7114
7115 /* Two kinds of register. */
7116 case OP_RIWR_RIWC:
7117 {
7118 struct reg_entry *rege = arm_reg_parse_multi (&str);
7119 if (!rege
7120 || (rege->type != REG_TYPE_MMXWR
7121 && rege->type != REG_TYPE_MMXWC
7122 && rege->type != REG_TYPE_MMXWCG))
7123 {
7124 inst.error = _("iWMMXt data or control register expected");
7125 goto failure;
7126 }
7127 inst.operands[i].reg = rege->number;
7128 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7129 }
7130 break;
7131
7132 case OP_RIWC_RIWG:
7133 {
7134 struct reg_entry *rege = arm_reg_parse_multi (&str);
7135 if (!rege
7136 || (rege->type != REG_TYPE_MMXWC
7137 && rege->type != REG_TYPE_MMXWCG))
7138 {
7139 inst.error = _("iWMMXt control register expected");
7140 goto failure;
7141 }
7142 inst.operands[i].reg = rege->number;
7143 inst.operands[i].isreg = 1;
7144 }
7145 break;
7146
7147 /* Misc */
7148 case OP_CPSF: val = parse_cps_flags (&str); break;
7149 case OP_ENDI: val = parse_endian_specifier (&str); break;
7150 case OP_oROR: val = parse_ror (&str); break;
7151 case OP_COND: val = parse_cond (&str); break;
7152 case OP_oBARRIER_I15:
7153 po_barrier_or_imm (str); break;
7154 immediate:
7155 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7156 goto failure;
7157 break;
7158
7159 case OP_wPSR:
7160 case OP_rPSR:
7161 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7162 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7163 {
7164 inst.error = _("Banked registers are not available with this "
7165 "architecture.");
7166 goto failure;
7167 }
7168 break;
7169 try_psr:
7170 val = parse_psr (&str, op_parse_code == OP_wPSR);
7171 break;
7172
7173 case OP_APSR_RR:
7174 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7175 break;
7176 try_apsr:
7177 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7178 instruction). */
7179 if (strncasecmp (str, "APSR_", 5) == 0)
7180 {
7181 unsigned found = 0;
7182 str += 5;
7183 while (found < 15)
7184 switch (*str++)
7185 {
7186 case 'c': found = (found & 1) ? 16 : found | 1; break;
7187 case 'n': found = (found & 2) ? 16 : found | 2; break;
7188 case 'z': found = (found & 4) ? 16 : found | 4; break;
7189 case 'v': found = (found & 8) ? 16 : found | 8; break;
7190 default: found = 16;
7191 }
7192 if (found != 15)
7193 goto failure;
7194 inst.operands[i].isvec = 1;
7195 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7196 inst.operands[i].reg = REG_PC;
7197 }
7198 else
7199 goto failure;
7200 break;
7201
7202 case OP_TB:
7203 po_misc_or_fail (parse_tb (&str));
7204 break;
7205
7206 /* Register lists. */
7207 case OP_REGLST:
7208 val = parse_reg_list (&str, REGLIST_RN);
7209 if (*str == '^')
7210 {
7211 inst.operands[i].writeback = 1;
7212 str++;
7213 }
7214 break;
7215
7216 case OP_CLRMLST:
7217 val = parse_reg_list (&str, REGLIST_CLRM);
7218 break;
7219
7220 case OP_VRSLST:
7221 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7222 break;
7223
7224 case OP_VRDLST:
7225 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7226 break;
7227
7228 case OP_VRSDLST:
7229 /* Allow Q registers too. */
7230 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7231 REGLIST_NEON_D);
7232 if (val == FAIL)
7233 {
7234 inst.error = NULL;
7235 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7236 REGLIST_VFP_S);
7237 inst.operands[i].issingle = 1;
7238 }
7239 break;
7240
7241 case OP_NRDLST:
7242 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7243 REGLIST_NEON_D);
7244 break;
7245
7246 case OP_NSTRLST:
7247 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7248 &inst.operands[i].vectype);
7249 break;
7250
7251 /* Addressing modes */
7252 case OP_ADDR:
7253 po_misc_or_fail (parse_address (&str, i));
7254 break;
7255
7256 case OP_ADDRGLDR:
7257 po_misc_or_fail_no_backtrack (
7258 parse_address_group_reloc (&str, i, GROUP_LDR));
7259 break;
7260
7261 case OP_ADDRGLDRS:
7262 po_misc_or_fail_no_backtrack (
7263 parse_address_group_reloc (&str, i, GROUP_LDRS));
7264 break;
7265
7266 case OP_ADDRGLDC:
7267 po_misc_or_fail_no_backtrack (
7268 parse_address_group_reloc (&str, i, GROUP_LDC));
7269 break;
7270
7271 case OP_SH:
7272 po_misc_or_fail (parse_shifter_operand (&str, i));
7273 break;
7274
7275 case OP_SHG:
7276 po_misc_or_fail_no_backtrack (
7277 parse_shifter_operand_group_reloc (&str, i));
7278 break;
7279
7280 case OP_oSHll:
7281 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7282 break;
7283
7284 case OP_oSHar:
7285 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7286 break;
7287
7288 case OP_oSHllar:
7289 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7290 break;
7291
7292 default:
7293 as_fatal (_("unhandled operand code %d"), op_parse_code);
7294 }
7295
7296 /* Various value-based sanity checks and shared operations. We
7297 do not signal immediate failures for the register constraints;
7298 this allows a syntax error to take precedence. */
7299 switch (op_parse_code)
7300 {
7301 case OP_oRRnpc:
7302 case OP_RRnpc:
7303 case OP_RRnpcb:
7304 case OP_RRw:
7305 case OP_oRRw:
7306 case OP_RRnpc_I0:
7307 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7308 inst.error = BAD_PC;
7309 break;
7310
7311 case OP_oRRnpcsp:
7312 case OP_RRnpcsp:
7313 if (inst.operands[i].isreg)
7314 {
7315 if (inst.operands[i].reg == REG_PC)
7316 inst.error = BAD_PC;
7317 else if (inst.operands[i].reg == REG_SP
7318 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7319 relaxed since ARMv8-A. */
7320 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7321 {
7322 gas_assert (thumb);
7323 inst.error = BAD_SP;
7324 }
7325 }
7326 break;
7327
7328 case OP_RRnpctw:
7329 if (inst.operands[i].isreg
7330 && inst.operands[i].reg == REG_PC
7331 && (inst.operands[i].writeback || thumb))
7332 inst.error = BAD_PC;
7333 break;
7334
7335 case OP_CPSF:
7336 case OP_ENDI:
7337 case OP_oROR:
7338 case OP_wPSR:
7339 case OP_rPSR:
7340 case OP_COND:
7341 case OP_oBARRIER_I15:
7342 case OP_REGLST:
7343 case OP_CLRMLST:
7344 case OP_VRSLST:
7345 case OP_VRDLST:
7346 case OP_VRSDLST:
7347 case OP_NRDLST:
7348 case OP_NSTRLST:
7349 if (val == FAIL)
7350 goto failure;
7351 inst.operands[i].imm = val;
7352 break;
7353
7354 case OP_LR:
7355 case OP_oLR:
7356 if (inst.operands[i].reg != REG_LR)
7357 inst.error = _("operand must be LR register");
7358 break;
7359
7360 default:
7361 break;
7362 }
7363
7364 /* If we get here, this operand was successfully parsed. */
7365 inst.operands[i].present = 1;
7366 continue;
7367
7368 bad_args:
7369 inst.error = BAD_ARGS;
7370
7371 failure:
7372 if (!backtrack_pos)
7373 {
7374 /* The parse routine should already have set inst.error, but set a
7375 default here just in case. */
7376 if (!inst.error)
7377 inst.error = _("syntax error");
7378 return FAIL;
7379 }
7380
7381 /* Do not backtrack over a trailing optional argument that
7382 absorbed some text. We will only fail again, with the
7383 'garbage following instruction' error message, which is
7384 probably less helpful than the current one. */
7385 if (backtrack_index == i && backtrack_pos != str
7386 && upat[i+1] == OP_stop)
7387 {
7388 if (!inst.error)
7389 inst.error = _("syntax error");
7390 return FAIL;
7391 }
7392
7393 /* Try again, skipping the optional argument at backtrack_pos. */
7394 str = backtrack_pos;
7395 inst.error = backtrack_error;
7396 inst.operands[backtrack_index].present = 0;
7397 i = backtrack_index;
7398 backtrack_pos = 0;
7399 }
7400
7401 /* Check that we have parsed all the arguments. */
7402 if (*str != '\0' && !inst.error)
7403 inst.error = _("garbage following instruction");
7404
7405 return inst.error ? FAIL : SUCCESS;
7406 }
7407
7408 #undef po_char_or_fail
7409 #undef po_reg_or_fail
7410 #undef po_reg_or_goto
7411 #undef po_imm_or_fail
7412 #undef po_scalar_or_fail
7413 #undef po_barrier_or_imm
7414
7415 /* Shorthand macro for instruction encoding functions issuing errors. */
7416 #define constraint(expr, err) \
7417 do \
7418 { \
7419 if (expr) \
7420 { \
7421 inst.error = err; \
7422 return; \
7423 } \
7424 } \
7425 while (0)
7426
7427 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7428 instructions are unpredictable if these registers are used. This
7429 is the BadReg predicate in ARM's Thumb-2 documentation.
7430
7431 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7432 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7433 #define reject_bad_reg(reg) \
7434 do \
7435 if (reg == REG_PC) \
7436 { \
7437 inst.error = BAD_PC; \
7438 return; \
7439 } \
7440 else if (reg == REG_SP \
7441 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7442 { \
7443 inst.error = BAD_SP; \
7444 return; \
7445 } \
7446 while (0)
7447
7448 /* If REG is R13 (the stack pointer), warn that its use is
7449 deprecated. */
7450 #define warn_deprecated_sp(reg) \
7451 do \
7452 if (warn_on_deprecated && reg == REG_SP) \
7453 as_tsktsk (_("use of r13 is deprecated")); \
7454 while (0)
7455
7456 /* Functions for operand encoding. ARM, then Thumb. */
7457
7458 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7459
7460 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7461
7462 The only binary encoding difference is the Coprocessor number. Coprocessor
7463 9 is used for half-precision calculations or conversions. The format of the
7464 instruction is the same as the equivalent Coprocessor 10 instruction that
7465 exists for Single-Precision operation. */
7466
7467 static void
7468 do_scalar_fp16_v82_encode (void)
7469 {
7470 if (inst.cond != COND_ALWAYS)
7471 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7472 " the behaviour is UNPREDICTABLE"));
7473 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7474 _(BAD_FP16));
7475
7476 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7477 mark_feature_used (&arm_ext_fp16);
7478 }
7479
7480 /* If VAL can be encoded in the immediate field of an ARM instruction,
7481 return the encoded form. Otherwise, return FAIL. */
7482
7483 static unsigned int
7484 encode_arm_immediate (unsigned int val)
7485 {
7486 unsigned int a, i;
7487
7488 if (val <= 0xff)
7489 return val;
7490
7491 for (i = 2; i < 32; i += 2)
7492 if ((a = rotate_left (val, i)) <= 0xff)
7493 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7494
7495 return FAIL;
7496 }
7497
7498 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7499 return the encoded form. Otherwise, return FAIL. */
7500 static unsigned int
7501 encode_thumb32_immediate (unsigned int val)
7502 {
7503 unsigned int a, i;
7504
7505 if (val <= 0xff)
7506 return val;
7507
7508 for (i = 1; i <= 24; i++)
7509 {
7510 a = val >> i;
7511 if ((val & ~(0xff << i)) == 0)
7512 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7513 }
7514
7515 a = val & 0xff;
7516 if (val == ((a << 16) | a))
7517 return 0x100 | a;
7518 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7519 return 0x300 | a;
7520
7521 a = val & 0xff00;
7522 if (val == ((a << 16) | a))
7523 return 0x200 | (a >> 8);
7524
7525 return FAIL;
7526 }
7527 /* Encode a VFP SP or DP register number into inst.instruction. */
7528
7529 static void
7530 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7531 {
7532 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7533 && reg > 15)
7534 {
7535 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7536 {
7537 if (thumb_mode)
7538 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7539 fpu_vfp_ext_d32);
7540 else
7541 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7542 fpu_vfp_ext_d32);
7543 }
7544 else
7545 {
7546 first_error (_("D register out of range for selected VFP version"));
7547 return;
7548 }
7549 }
7550
7551 switch (pos)
7552 {
7553 case VFP_REG_Sd:
7554 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7555 break;
7556
7557 case VFP_REG_Sn:
7558 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7559 break;
7560
7561 case VFP_REG_Sm:
7562 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7563 break;
7564
7565 case VFP_REG_Dd:
7566 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7567 break;
7568
7569 case VFP_REG_Dn:
7570 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7571 break;
7572
7573 case VFP_REG_Dm:
7574 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7575 break;
7576
7577 default:
7578 abort ();
7579 }
7580 }
7581
7582 /* Encode a <shift> in an ARM-format instruction. The immediate,
7583 if any, is handled by md_apply_fix. */
7584 static void
7585 encode_arm_shift (int i)
7586 {
7587 /* register-shifted register. */
7588 if (inst.operands[i].immisreg)
7589 {
7590 int op_index;
7591 for (op_index = 0; op_index <= i; ++op_index)
7592 {
7593 /* Check the operand only when it's presented. In pre-UAL syntax,
7594 if the destination register is the same as the first operand, two
7595 register form of the instruction can be used. */
7596 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7597 && inst.operands[op_index].reg == REG_PC)
7598 as_warn (UNPRED_REG ("r15"));
7599 }
7600
7601 if (inst.operands[i].imm == REG_PC)
7602 as_warn (UNPRED_REG ("r15"));
7603 }
7604
7605 if (inst.operands[i].shift_kind == SHIFT_RRX)
7606 inst.instruction |= SHIFT_ROR << 5;
7607 else
7608 {
7609 inst.instruction |= inst.operands[i].shift_kind << 5;
7610 if (inst.operands[i].immisreg)
7611 {
7612 inst.instruction |= SHIFT_BY_REG;
7613 inst.instruction |= inst.operands[i].imm << 8;
7614 }
7615 else
7616 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7617 }
7618 }
7619
7620 static void
7621 encode_arm_shifter_operand (int i)
7622 {
7623 if (inst.operands[i].isreg)
7624 {
7625 inst.instruction |= inst.operands[i].reg;
7626 encode_arm_shift (i);
7627 }
7628 else
7629 {
7630 inst.instruction |= INST_IMMEDIATE;
7631 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7632 inst.instruction |= inst.operands[i].imm;
7633 }
7634 }
7635
7636 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7637 static void
7638 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7639 {
7640 /* PR 14260:
7641 Generate an error if the operand is not a register. */
7642 constraint (!inst.operands[i].isreg,
7643 _("Instruction does not support =N addresses"));
7644
7645 inst.instruction |= inst.operands[i].reg << 16;
7646
7647 if (inst.operands[i].preind)
7648 {
7649 if (is_t)
7650 {
7651 inst.error = _("instruction does not accept preindexed addressing");
7652 return;
7653 }
7654 inst.instruction |= PRE_INDEX;
7655 if (inst.operands[i].writeback)
7656 inst.instruction |= WRITE_BACK;
7657
7658 }
7659 else if (inst.operands[i].postind)
7660 {
7661 gas_assert (inst.operands[i].writeback);
7662 if (is_t)
7663 inst.instruction |= WRITE_BACK;
7664 }
7665 else /* unindexed - only for coprocessor */
7666 {
7667 inst.error = _("instruction does not accept unindexed addressing");
7668 return;
7669 }
7670
7671 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7672 && (((inst.instruction & 0x000f0000) >> 16)
7673 == ((inst.instruction & 0x0000f000) >> 12)))
7674 as_warn ((inst.instruction & LOAD_BIT)
7675 ? _("destination register same as write-back base")
7676 : _("source register same as write-back base"));
7677 }
7678
7679 /* inst.operands[i] was set up by parse_address. Encode it into an
7680 ARM-format mode 2 load or store instruction. If is_t is true,
7681 reject forms that cannot be used with a T instruction (i.e. not
7682 post-indexed). */
7683 static void
7684 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7685 {
7686 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7687
7688 encode_arm_addr_mode_common (i, is_t);
7689
7690 if (inst.operands[i].immisreg)
7691 {
7692 constraint ((inst.operands[i].imm == REG_PC
7693 || (is_pc && inst.operands[i].writeback)),
7694 BAD_PC_ADDRESSING);
7695 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7696 inst.instruction |= inst.operands[i].imm;
7697 if (!inst.operands[i].negative)
7698 inst.instruction |= INDEX_UP;
7699 if (inst.operands[i].shifted)
7700 {
7701 if (inst.operands[i].shift_kind == SHIFT_RRX)
7702 inst.instruction |= SHIFT_ROR << 5;
7703 else
7704 {
7705 inst.instruction |= inst.operands[i].shift_kind << 5;
7706 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7707 }
7708 }
7709 }
7710 else /* immediate offset in inst.relocs[0] */
7711 {
7712 if (is_pc && !inst.relocs[0].pc_rel)
7713 {
7714 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7715
7716 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7717 cannot use PC in addressing.
7718 PC cannot be used in writeback addressing, either. */
7719 constraint ((is_t || inst.operands[i].writeback),
7720 BAD_PC_ADDRESSING);
7721
7722 /* Use of PC in str is deprecated for ARMv7. */
7723 if (warn_on_deprecated
7724 && !is_load
7725 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7726 as_tsktsk (_("use of PC in this instruction is deprecated"));
7727 }
7728
7729 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7730 {
7731 /* Prefer + for zero encoded value. */
7732 if (!inst.operands[i].negative)
7733 inst.instruction |= INDEX_UP;
7734 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
7735 }
7736 }
7737 }
7738
7739 /* inst.operands[i] was set up by parse_address. Encode it into an
7740 ARM-format mode 3 load or store instruction. Reject forms that
7741 cannot be used with such instructions. If is_t is true, reject
7742 forms that cannot be used with a T instruction (i.e. not
7743 post-indexed). */
7744 static void
7745 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7746 {
7747 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7748 {
7749 inst.error = _("instruction does not accept scaled register index");
7750 return;
7751 }
7752
7753 encode_arm_addr_mode_common (i, is_t);
7754
7755 if (inst.operands[i].immisreg)
7756 {
7757 constraint ((inst.operands[i].imm == REG_PC
7758 || (is_t && inst.operands[i].reg == REG_PC)),
7759 BAD_PC_ADDRESSING);
7760 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7761 BAD_PC_WRITEBACK);
7762 inst.instruction |= inst.operands[i].imm;
7763 if (!inst.operands[i].negative)
7764 inst.instruction |= INDEX_UP;
7765 }
7766 else /* immediate offset in inst.relocs[0] */
7767 {
7768 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
7769 && inst.operands[i].writeback),
7770 BAD_PC_WRITEBACK);
7771 inst.instruction |= HWOFFSET_IMM;
7772 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7773 {
7774 /* Prefer + for zero encoded value. */
7775 if (!inst.operands[i].negative)
7776 inst.instruction |= INDEX_UP;
7777
7778 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
7779 }
7780 }
7781 }
7782
7783 /* Write immediate bits [7:0] to the following locations:
7784
7785 |28/24|23 19|18 16|15 4|3 0|
7786 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7787
7788 This function is used by VMOV/VMVN/VORR/VBIC. */
7789
7790 static void
7791 neon_write_immbits (unsigned immbits)
7792 {
7793 inst.instruction |= immbits & 0xf;
7794 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7795 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7796 }
7797
7798 /* Invert low-order SIZE bits of XHI:XLO. */
7799
7800 static void
7801 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7802 {
7803 unsigned immlo = xlo ? *xlo : 0;
7804 unsigned immhi = xhi ? *xhi : 0;
7805
7806 switch (size)
7807 {
7808 case 8:
7809 immlo = (~immlo) & 0xff;
7810 break;
7811
7812 case 16:
7813 immlo = (~immlo) & 0xffff;
7814 break;
7815
7816 case 64:
7817 immhi = (~immhi) & 0xffffffff;
7818 /* fall through. */
7819
7820 case 32:
7821 immlo = (~immlo) & 0xffffffff;
7822 break;
7823
7824 default:
7825 abort ();
7826 }
7827
7828 if (xlo)
7829 *xlo = immlo;
7830
7831 if (xhi)
7832 *xhi = immhi;
7833 }
7834
7835 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7836 A, B, C, D. */
7837
7838 static int
7839 neon_bits_same_in_bytes (unsigned imm)
7840 {
7841 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7842 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7843 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7844 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7845 }
7846
7847 /* For immediate of above form, return 0bABCD. */
7848
7849 static unsigned
7850 neon_squash_bits (unsigned imm)
7851 {
7852 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7853 | ((imm & 0x01000000) >> 21);
7854 }
7855
7856 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7857
7858 static unsigned
7859 neon_qfloat_bits (unsigned imm)
7860 {
7861 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7862 }
7863
7864 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7865 the instruction. *OP is passed as the initial value of the op field, and
7866 may be set to a different value depending on the constant (i.e.
7867 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7868 MVN). If the immediate looks like a repeated pattern then also
7869 try smaller element sizes. */
7870
7871 static int
7872 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7873 unsigned *immbits, int *op, int size,
7874 enum neon_el_type type)
7875 {
7876 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7877 float. */
7878 if (type == NT_float && !float_p)
7879 return FAIL;
7880
7881 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7882 {
7883 if (size != 32 || *op == 1)
7884 return FAIL;
7885 *immbits = neon_qfloat_bits (immlo);
7886 return 0xf;
7887 }
7888
7889 if (size == 64)
7890 {
7891 if (neon_bits_same_in_bytes (immhi)
7892 && neon_bits_same_in_bytes (immlo))
7893 {
7894 if (*op == 1)
7895 return FAIL;
7896 *immbits = (neon_squash_bits (immhi) << 4)
7897 | neon_squash_bits (immlo);
7898 *op = 1;
7899 return 0xe;
7900 }
7901
7902 if (immhi != immlo)
7903 return FAIL;
7904 }
7905
7906 if (size >= 32)
7907 {
7908 if (immlo == (immlo & 0x000000ff))
7909 {
7910 *immbits = immlo;
7911 return 0x0;
7912 }
7913 else if (immlo == (immlo & 0x0000ff00))
7914 {
7915 *immbits = immlo >> 8;
7916 return 0x2;
7917 }
7918 else if (immlo == (immlo & 0x00ff0000))
7919 {
7920 *immbits = immlo >> 16;
7921 return 0x4;
7922 }
7923 else if (immlo == (immlo & 0xff000000))
7924 {
7925 *immbits = immlo >> 24;
7926 return 0x6;
7927 }
7928 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7929 {
7930 *immbits = (immlo >> 8) & 0xff;
7931 return 0xc;
7932 }
7933 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7934 {
7935 *immbits = (immlo >> 16) & 0xff;
7936 return 0xd;
7937 }
7938
7939 if ((immlo & 0xffff) != (immlo >> 16))
7940 return FAIL;
7941 immlo &= 0xffff;
7942 }
7943
7944 if (size >= 16)
7945 {
7946 if (immlo == (immlo & 0x000000ff))
7947 {
7948 *immbits = immlo;
7949 return 0x8;
7950 }
7951 else if (immlo == (immlo & 0x0000ff00))
7952 {
7953 *immbits = immlo >> 8;
7954 return 0xa;
7955 }
7956
7957 if ((immlo & 0xff) != (immlo >> 8))
7958 return FAIL;
7959 immlo &= 0xff;
7960 }
7961
7962 if (immlo == (immlo & 0x000000ff))
7963 {
7964 /* Don't allow MVN with 8-bit immediate. */
7965 if (*op == 1)
7966 return FAIL;
7967 *immbits = immlo;
7968 return 0xe;
7969 }
7970
7971 return FAIL;
7972 }
7973
7974 #if defined BFD_HOST_64_BIT
7975 /* Returns TRUE if double precision value V may be cast
7976 to single precision without loss of accuracy. */
7977
7978 static bfd_boolean
7979 is_double_a_single (bfd_int64_t v)
7980 {
7981 int exp = (int)((v >> 52) & 0x7FF);
7982 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7983
7984 return (exp == 0 || exp == 0x7FF
7985 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7986 && (mantissa & 0x1FFFFFFFl) == 0;
7987 }
7988
7989 /* Returns a double precision value casted to single precision
7990 (ignoring the least significant bits in exponent and mantissa). */
7991
7992 static int
7993 double_to_single (bfd_int64_t v)
7994 {
7995 int sign = (int) ((v >> 63) & 1l);
7996 int exp = (int) ((v >> 52) & 0x7FF);
7997 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7998
7999 if (exp == 0x7FF)
8000 exp = 0xFF;
8001 else
8002 {
8003 exp = exp - 1023 + 127;
8004 if (exp >= 0xFF)
8005 {
8006 /* Infinity. */
8007 exp = 0x7F;
8008 mantissa = 0;
8009 }
8010 else if (exp < 0)
8011 {
8012 /* No denormalized numbers. */
8013 exp = 0;
8014 mantissa = 0;
8015 }
8016 }
8017 mantissa >>= 29;
8018 return (sign << 31) | (exp << 23) | mantissa;
8019 }
8020 #endif /* BFD_HOST_64_BIT */
8021
8022 enum lit_type
8023 {
8024 CONST_THUMB,
8025 CONST_ARM,
8026 CONST_VEC
8027 };
8028
8029 static void do_vfp_nsyn_opcode (const char *);
8030
8031 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8032 Determine whether it can be performed with a move instruction; if
8033 it can, convert inst.instruction to that move instruction and
8034 return TRUE; if it can't, convert inst.instruction to a literal-pool
8035 load and return FALSE. If this is not a valid thing to do in the
8036 current context, set inst.error and return TRUE.
8037
8038 inst.operands[i] describes the destination register. */
8039
8040 static bfd_boolean
8041 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8042 {
8043 unsigned long tbit;
8044 bfd_boolean thumb_p = (t == CONST_THUMB);
8045 bfd_boolean arm_p = (t == CONST_ARM);
8046
8047 if (thumb_p)
8048 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8049 else
8050 tbit = LOAD_BIT;
8051
8052 if ((inst.instruction & tbit) == 0)
8053 {
8054 inst.error = _("invalid pseudo operation");
8055 return TRUE;
8056 }
8057
8058 if (inst.relocs[0].exp.X_op != O_constant
8059 && inst.relocs[0].exp.X_op != O_symbol
8060 && inst.relocs[0].exp.X_op != O_big)
8061 {
8062 inst.error = _("constant expression expected");
8063 return TRUE;
8064 }
8065
8066 if (inst.relocs[0].exp.X_op == O_constant
8067 || inst.relocs[0].exp.X_op == O_big)
8068 {
8069 #if defined BFD_HOST_64_BIT
8070 bfd_int64_t v;
8071 #else
8072 offsetT v;
8073 #endif
8074 if (inst.relocs[0].exp.X_op == O_big)
8075 {
8076 LITTLENUM_TYPE w[X_PRECISION];
8077 LITTLENUM_TYPE * l;
8078
8079 if (inst.relocs[0].exp.X_add_number == -1)
8080 {
8081 gen_to_words (w, X_PRECISION, E_PRECISION);
8082 l = w;
8083 /* FIXME: Should we check words w[2..5] ? */
8084 }
8085 else
8086 l = generic_bignum;
8087
8088 #if defined BFD_HOST_64_BIT
8089 v =
8090 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8091 << LITTLENUM_NUMBER_OF_BITS)
8092 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8093 << LITTLENUM_NUMBER_OF_BITS)
8094 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8095 << LITTLENUM_NUMBER_OF_BITS)
8096 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8097 #else
8098 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8099 | (l[0] & LITTLENUM_MASK);
8100 #endif
8101 }
8102 else
8103 v = inst.relocs[0].exp.X_add_number;
8104
8105 if (!inst.operands[i].issingle)
8106 {
8107 if (thumb_p)
8108 {
8109 /* LDR should not use lead in a flag-setting instruction being
8110 chosen so we do not check whether movs can be used. */
8111
8112 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8113 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8114 && inst.operands[i].reg != 13
8115 && inst.operands[i].reg != 15)
8116 {
8117 /* Check if on thumb2 it can be done with a mov.w, mvn or
8118 movw instruction. */
8119 unsigned int newimm;
8120 bfd_boolean isNegated;
8121
8122 newimm = encode_thumb32_immediate (v);
8123 if (newimm != (unsigned int) FAIL)
8124 isNegated = FALSE;
8125 else
8126 {
8127 newimm = encode_thumb32_immediate (~v);
8128 if (newimm != (unsigned int) FAIL)
8129 isNegated = TRUE;
8130 }
8131
8132 /* The number can be loaded with a mov.w or mvn
8133 instruction. */
8134 if (newimm != (unsigned int) FAIL
8135 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8136 {
8137 inst.instruction = (0xf04f0000 /* MOV.W. */
8138 | (inst.operands[i].reg << 8));
8139 /* Change to MOVN. */
8140 inst.instruction |= (isNegated ? 0x200000 : 0);
8141 inst.instruction |= (newimm & 0x800) << 15;
8142 inst.instruction |= (newimm & 0x700) << 4;
8143 inst.instruction |= (newimm & 0x0ff);
8144 return TRUE;
8145 }
8146 /* The number can be loaded with a movw instruction. */
8147 else if ((v & ~0xFFFF) == 0
8148 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8149 {
8150 int imm = v & 0xFFFF;
8151
8152 inst.instruction = 0xf2400000; /* MOVW. */
8153 inst.instruction |= (inst.operands[i].reg << 8);
8154 inst.instruction |= (imm & 0xf000) << 4;
8155 inst.instruction |= (imm & 0x0800) << 15;
8156 inst.instruction |= (imm & 0x0700) << 4;
8157 inst.instruction |= (imm & 0x00ff);
8158 return TRUE;
8159 }
8160 }
8161 }
8162 else if (arm_p)
8163 {
8164 int value = encode_arm_immediate (v);
8165
8166 if (value != FAIL)
8167 {
8168 /* This can be done with a mov instruction. */
8169 inst.instruction &= LITERAL_MASK;
8170 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8171 inst.instruction |= value & 0xfff;
8172 return TRUE;
8173 }
8174
8175 value = encode_arm_immediate (~ v);
8176 if (value != FAIL)
8177 {
8178 /* This can be done with a mvn instruction. */
8179 inst.instruction &= LITERAL_MASK;
8180 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8181 inst.instruction |= value & 0xfff;
8182 return TRUE;
8183 }
8184 }
8185 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8186 {
8187 int op = 0;
8188 unsigned immbits = 0;
8189 unsigned immlo = inst.operands[1].imm;
8190 unsigned immhi = inst.operands[1].regisimm
8191 ? inst.operands[1].reg
8192 : inst.relocs[0].exp.X_unsigned
8193 ? 0
8194 : ((bfd_int64_t)((int) immlo)) >> 32;
8195 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8196 &op, 64, NT_invtype);
8197
8198 if (cmode == FAIL)
8199 {
8200 neon_invert_size (&immlo, &immhi, 64);
8201 op = !op;
8202 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8203 &op, 64, NT_invtype);
8204 }
8205
8206 if (cmode != FAIL)
8207 {
8208 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8209 | (1 << 23)
8210 | (cmode << 8)
8211 | (op << 5)
8212 | (1 << 4);
8213
8214 /* Fill other bits in vmov encoding for both thumb and arm. */
8215 if (thumb_mode)
8216 inst.instruction |= (0x7U << 29) | (0xF << 24);
8217 else
8218 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8219 neon_write_immbits (immbits);
8220 return TRUE;
8221 }
8222 }
8223 }
8224
8225 if (t == CONST_VEC)
8226 {
8227 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8228 if (inst.operands[i].issingle
8229 && is_quarter_float (inst.operands[1].imm)
8230 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8231 {
8232 inst.operands[1].imm =
8233 neon_qfloat_bits (v);
8234 do_vfp_nsyn_opcode ("fconsts");
8235 return TRUE;
8236 }
8237
8238 /* If our host does not support a 64-bit type then we cannot perform
8239 the following optimization. This mean that there will be a
8240 discrepancy between the output produced by an assembler built for
8241 a 32-bit-only host and the output produced from a 64-bit host, but
8242 this cannot be helped. */
8243 #if defined BFD_HOST_64_BIT
8244 else if (!inst.operands[1].issingle
8245 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8246 {
8247 if (is_double_a_single (v)
8248 && is_quarter_float (double_to_single (v)))
8249 {
8250 inst.operands[1].imm =
8251 neon_qfloat_bits (double_to_single (v));
8252 do_vfp_nsyn_opcode ("fconstd");
8253 return TRUE;
8254 }
8255 }
8256 #endif
8257 }
8258 }
8259
8260 if (add_to_lit_pool ((!inst.operands[i].isvec
8261 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8262 return TRUE;
8263
8264 inst.operands[1].reg = REG_PC;
8265 inst.operands[1].isreg = 1;
8266 inst.operands[1].preind = 1;
8267 inst.relocs[0].pc_rel = 1;
8268 inst.relocs[0].type = (thumb_p
8269 ? BFD_RELOC_ARM_THUMB_OFFSET
8270 : (mode_3
8271 ? BFD_RELOC_ARM_HWLITERAL
8272 : BFD_RELOC_ARM_LITERAL));
8273 return FALSE;
8274 }
8275
8276 /* inst.operands[i] was set up by parse_address. Encode it into an
8277 ARM-format instruction. Reject all forms which cannot be encoded
8278 into a coprocessor load/store instruction. If wb_ok is false,
8279 reject use of writeback; if unind_ok is false, reject use of
8280 unindexed addressing. If reloc_override is not 0, use it instead
8281 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8282 (in which case it is preserved). */
8283
8284 static int
8285 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8286 {
8287 if (!inst.operands[i].isreg)
8288 {
8289 /* PR 18256 */
8290 if (! inst.operands[0].isvec)
8291 {
8292 inst.error = _("invalid co-processor operand");
8293 return FAIL;
8294 }
8295 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8296 return SUCCESS;
8297 }
8298
8299 inst.instruction |= inst.operands[i].reg << 16;
8300
8301 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8302
8303 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8304 {
8305 gas_assert (!inst.operands[i].writeback);
8306 if (!unind_ok)
8307 {
8308 inst.error = _("instruction does not support unindexed addressing");
8309 return FAIL;
8310 }
8311 inst.instruction |= inst.operands[i].imm;
8312 inst.instruction |= INDEX_UP;
8313 return SUCCESS;
8314 }
8315
8316 if (inst.operands[i].preind)
8317 inst.instruction |= PRE_INDEX;
8318
8319 if (inst.operands[i].writeback)
8320 {
8321 if (inst.operands[i].reg == REG_PC)
8322 {
8323 inst.error = _("pc may not be used with write-back");
8324 return FAIL;
8325 }
8326 if (!wb_ok)
8327 {
8328 inst.error = _("instruction does not support writeback");
8329 return FAIL;
8330 }
8331 inst.instruction |= WRITE_BACK;
8332 }
8333
8334 if (reloc_override)
8335 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8336 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8337 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8338 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8339 {
8340 if (thumb_mode)
8341 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8342 else
8343 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8344 }
8345
8346 /* Prefer + for zero encoded value. */
8347 if (!inst.operands[i].negative)
8348 inst.instruction |= INDEX_UP;
8349
8350 return SUCCESS;
8351 }
8352
8353 /* Functions for instruction encoding, sorted by sub-architecture.
8354 First some generics; their names are taken from the conventional
8355 bit positions for register arguments in ARM format instructions. */
8356
8357 static void
8358 do_noargs (void)
8359 {
8360 }
8361
8362 static void
8363 do_rd (void)
8364 {
8365 inst.instruction |= inst.operands[0].reg << 12;
8366 }
8367
8368 static void
8369 do_rn (void)
8370 {
8371 inst.instruction |= inst.operands[0].reg << 16;
8372 }
8373
8374 static void
8375 do_rd_rm (void)
8376 {
8377 inst.instruction |= inst.operands[0].reg << 12;
8378 inst.instruction |= inst.operands[1].reg;
8379 }
8380
8381 static void
8382 do_rm_rn (void)
8383 {
8384 inst.instruction |= inst.operands[0].reg;
8385 inst.instruction |= inst.operands[1].reg << 16;
8386 }
8387
8388 static void
8389 do_rd_rn (void)
8390 {
8391 inst.instruction |= inst.operands[0].reg << 12;
8392 inst.instruction |= inst.operands[1].reg << 16;
8393 }
8394
8395 static void
8396 do_rn_rd (void)
8397 {
8398 inst.instruction |= inst.operands[0].reg << 16;
8399 inst.instruction |= inst.operands[1].reg << 12;
8400 }
8401
8402 static void
8403 do_tt (void)
8404 {
8405 inst.instruction |= inst.operands[0].reg << 8;
8406 inst.instruction |= inst.operands[1].reg << 16;
8407 }
8408
8409 static bfd_boolean
8410 check_obsolete (const arm_feature_set *feature, const char *msg)
8411 {
8412 if (ARM_CPU_IS_ANY (cpu_variant))
8413 {
8414 as_tsktsk ("%s", msg);
8415 return TRUE;
8416 }
8417 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8418 {
8419 as_bad ("%s", msg);
8420 return TRUE;
8421 }
8422
8423 return FALSE;
8424 }
8425
8426 static void
8427 do_rd_rm_rn (void)
8428 {
8429 unsigned Rn = inst.operands[2].reg;
8430 /* Enforce restrictions on SWP instruction. */
8431 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8432 {
8433 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8434 _("Rn must not overlap other operands"));
8435
8436 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8437 */
8438 if (!check_obsolete (&arm_ext_v8,
8439 _("swp{b} use is obsoleted for ARMv8 and later"))
8440 && warn_on_deprecated
8441 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8442 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8443 }
8444
8445 inst.instruction |= inst.operands[0].reg << 12;
8446 inst.instruction |= inst.operands[1].reg;
8447 inst.instruction |= Rn << 16;
8448 }
8449
8450 static void
8451 do_rd_rn_rm (void)
8452 {
8453 inst.instruction |= inst.operands[0].reg << 12;
8454 inst.instruction |= inst.operands[1].reg << 16;
8455 inst.instruction |= inst.operands[2].reg;
8456 }
8457
8458 static void
8459 do_rm_rd_rn (void)
8460 {
8461 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8462 constraint (((inst.relocs[0].exp.X_op != O_constant
8463 && inst.relocs[0].exp.X_op != O_illegal)
8464 || inst.relocs[0].exp.X_add_number != 0),
8465 BAD_ADDR_MODE);
8466 inst.instruction |= inst.operands[0].reg;
8467 inst.instruction |= inst.operands[1].reg << 12;
8468 inst.instruction |= inst.operands[2].reg << 16;
8469 }
8470
8471 static void
8472 do_imm0 (void)
8473 {
8474 inst.instruction |= inst.operands[0].imm;
8475 }
8476
8477 static void
8478 do_rd_cpaddr (void)
8479 {
8480 inst.instruction |= inst.operands[0].reg << 12;
8481 encode_arm_cp_address (1, TRUE, TRUE, 0);
8482 }
8483
8484 /* ARM instructions, in alphabetical order by function name (except
8485 that wrapper functions appear immediately after the function they
8486 wrap). */
8487
8488 /* This is a pseudo-op of the form "adr rd, label" to be converted
8489 into a relative address of the form "add rd, pc, #label-.-8". */
8490
8491 static void
8492 do_adr (void)
8493 {
8494 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8495
8496 /* Frag hacking will turn this into a sub instruction if the offset turns
8497 out to be negative. */
8498 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8499 inst.relocs[0].pc_rel = 1;
8500 inst.relocs[0].exp.X_add_number -= 8;
8501
8502 if (support_interwork
8503 && inst.relocs[0].exp.X_op == O_symbol
8504 && inst.relocs[0].exp.X_add_symbol != NULL
8505 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8506 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8507 inst.relocs[0].exp.X_add_number |= 1;
8508 }
8509
8510 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8511 into a relative address of the form:
8512 add rd, pc, #low(label-.-8)"
8513 add rd, rd, #high(label-.-8)" */
8514
8515 static void
8516 do_adrl (void)
8517 {
8518 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8519
8520 /* Frag hacking will turn this into a sub instruction if the offset turns
8521 out to be negative. */
8522 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8523 inst.relocs[0].pc_rel = 1;
8524 inst.size = INSN_SIZE * 2;
8525 inst.relocs[0].exp.X_add_number -= 8;
8526
8527 if (support_interwork
8528 && inst.relocs[0].exp.X_op == O_symbol
8529 && inst.relocs[0].exp.X_add_symbol != NULL
8530 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8531 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8532 inst.relocs[0].exp.X_add_number |= 1;
8533 }
8534
8535 static void
8536 do_arit (void)
8537 {
8538 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8539 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8540 THUMB1_RELOC_ONLY);
8541 if (!inst.operands[1].present)
8542 inst.operands[1].reg = inst.operands[0].reg;
8543 inst.instruction |= inst.operands[0].reg << 12;
8544 inst.instruction |= inst.operands[1].reg << 16;
8545 encode_arm_shifter_operand (2);
8546 }
8547
8548 static void
8549 do_barrier (void)
8550 {
8551 if (inst.operands[0].present)
8552 inst.instruction |= inst.operands[0].imm;
8553 else
8554 inst.instruction |= 0xf;
8555 }
8556
8557 static void
8558 do_bfc (void)
8559 {
8560 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8561 constraint (msb > 32, _("bit-field extends past end of register"));
8562 /* The instruction encoding stores the LSB and MSB,
8563 not the LSB and width. */
8564 inst.instruction |= inst.operands[0].reg << 12;
8565 inst.instruction |= inst.operands[1].imm << 7;
8566 inst.instruction |= (msb - 1) << 16;
8567 }
8568
8569 static void
8570 do_bfi (void)
8571 {
8572 unsigned int msb;
8573
8574 /* #0 in second position is alternative syntax for bfc, which is
8575 the same instruction but with REG_PC in the Rm field. */
8576 if (!inst.operands[1].isreg)
8577 inst.operands[1].reg = REG_PC;
8578
8579 msb = inst.operands[2].imm + inst.operands[3].imm;
8580 constraint (msb > 32, _("bit-field extends past end of register"));
8581 /* The instruction encoding stores the LSB and MSB,
8582 not the LSB and width. */
8583 inst.instruction |= inst.operands[0].reg << 12;
8584 inst.instruction |= inst.operands[1].reg;
8585 inst.instruction |= inst.operands[2].imm << 7;
8586 inst.instruction |= (msb - 1) << 16;
8587 }
8588
8589 static void
8590 do_bfx (void)
8591 {
8592 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8593 _("bit-field extends past end of register"));
8594 inst.instruction |= inst.operands[0].reg << 12;
8595 inst.instruction |= inst.operands[1].reg;
8596 inst.instruction |= inst.operands[2].imm << 7;
8597 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8598 }
8599
8600 /* ARM V5 breakpoint instruction (argument parse)
8601 BKPT <16 bit unsigned immediate>
8602 Instruction is not conditional.
8603 The bit pattern given in insns[] has the COND_ALWAYS condition,
8604 and it is an error if the caller tried to override that. */
8605
8606 static void
8607 do_bkpt (void)
8608 {
8609 /* Top 12 of 16 bits to bits 19:8. */
8610 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8611
8612 /* Bottom 4 of 16 bits to bits 3:0. */
8613 inst.instruction |= inst.operands[0].imm & 0xf;
8614 }
8615
8616 static void
8617 encode_branch (int default_reloc)
8618 {
8619 if (inst.operands[0].hasreloc)
8620 {
8621 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8622 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8623 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8624 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8625 ? BFD_RELOC_ARM_PLT32
8626 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8627 }
8628 else
8629 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8630 inst.relocs[0].pc_rel = 1;
8631 }
8632
8633 static void
8634 do_branch (void)
8635 {
8636 #ifdef OBJ_ELF
8637 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8638 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8639 else
8640 #endif
8641 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8642 }
8643
8644 static void
8645 do_bl (void)
8646 {
8647 #ifdef OBJ_ELF
8648 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8649 {
8650 if (inst.cond == COND_ALWAYS)
8651 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8652 else
8653 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8654 }
8655 else
8656 #endif
8657 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8658 }
8659
8660 /* ARM V5 branch-link-exchange instruction (argument parse)
8661 BLX <target_addr> ie BLX(1)
8662 BLX{<condition>} <Rm> ie BLX(2)
8663 Unfortunately, there are two different opcodes for this mnemonic.
8664 So, the insns[].value is not used, and the code here zaps values
8665 into inst.instruction.
8666 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8667
8668 static void
8669 do_blx (void)
8670 {
8671 if (inst.operands[0].isreg)
8672 {
8673 /* Arg is a register; the opcode provided by insns[] is correct.
8674 It is not illegal to do "blx pc", just useless. */
8675 if (inst.operands[0].reg == REG_PC)
8676 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8677
8678 inst.instruction |= inst.operands[0].reg;
8679 }
8680 else
8681 {
8682 /* Arg is an address; this instruction cannot be executed
8683 conditionally, and the opcode must be adjusted.
8684 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8685 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8686 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8687 inst.instruction = 0xfa000000;
8688 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8689 }
8690 }
8691
8692 static void
8693 do_bx (void)
8694 {
8695 bfd_boolean want_reloc;
8696
8697 if (inst.operands[0].reg == REG_PC)
8698 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8699
8700 inst.instruction |= inst.operands[0].reg;
8701 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8702 it is for ARMv4t or earlier. */
8703 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8704 if (!ARM_FEATURE_ZERO (selected_object_arch)
8705 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8706 want_reloc = TRUE;
8707
8708 #ifdef OBJ_ELF
8709 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8710 #endif
8711 want_reloc = FALSE;
8712
8713 if (want_reloc)
8714 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
8715 }
8716
8717
8718 /* ARM v5TEJ. Jump to Jazelle code. */
8719
8720 static void
8721 do_bxj (void)
8722 {
8723 if (inst.operands[0].reg == REG_PC)
8724 as_tsktsk (_("use of r15 in bxj is not really useful"));
8725
8726 inst.instruction |= inst.operands[0].reg;
8727 }
8728
8729 /* Co-processor data operation:
8730 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8731 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8732 static void
8733 do_cdp (void)
8734 {
8735 inst.instruction |= inst.operands[0].reg << 8;
8736 inst.instruction |= inst.operands[1].imm << 20;
8737 inst.instruction |= inst.operands[2].reg << 12;
8738 inst.instruction |= inst.operands[3].reg << 16;
8739 inst.instruction |= inst.operands[4].reg;
8740 inst.instruction |= inst.operands[5].imm << 5;
8741 }
8742
8743 static void
8744 do_cmp (void)
8745 {
8746 inst.instruction |= inst.operands[0].reg << 16;
8747 encode_arm_shifter_operand (1);
8748 }
8749
8750 /* Transfer between coprocessor and ARM registers.
8751 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8752 MRC2
8753 MCR{cond}
8754 MCR2
8755
8756 No special properties. */
8757
8758 struct deprecated_coproc_regs_s
8759 {
8760 unsigned cp;
8761 int opc1;
8762 unsigned crn;
8763 unsigned crm;
8764 int opc2;
8765 arm_feature_set deprecated;
8766 arm_feature_set obsoleted;
8767 const char *dep_msg;
8768 const char *obs_msg;
8769 };
8770
8771 #define DEPR_ACCESS_V8 \
8772 N_("This coprocessor register access is deprecated in ARMv8")
8773
8774 /* Table of all deprecated coprocessor registers. */
8775 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8776 {
8777 {15, 0, 7, 10, 5, /* CP15DMB. */
8778 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8779 DEPR_ACCESS_V8, NULL},
8780 {15, 0, 7, 10, 4, /* CP15DSB. */
8781 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8782 DEPR_ACCESS_V8, NULL},
8783 {15, 0, 7, 5, 4, /* CP15ISB. */
8784 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8785 DEPR_ACCESS_V8, NULL},
8786 {14, 6, 1, 0, 0, /* TEEHBR. */
8787 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8788 DEPR_ACCESS_V8, NULL},
8789 {14, 6, 0, 0, 0, /* TEECR. */
8790 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8791 DEPR_ACCESS_V8, NULL},
8792 };
8793
8794 #undef DEPR_ACCESS_V8
8795
8796 static const size_t deprecated_coproc_reg_count =
8797 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8798
8799 static void
8800 do_co_reg (void)
8801 {
8802 unsigned Rd;
8803 size_t i;
8804
8805 Rd = inst.operands[2].reg;
8806 if (thumb_mode)
8807 {
8808 if (inst.instruction == 0xee000010
8809 || inst.instruction == 0xfe000010)
8810 /* MCR, MCR2 */
8811 reject_bad_reg (Rd);
8812 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8813 /* MRC, MRC2 */
8814 constraint (Rd == REG_SP, BAD_SP);
8815 }
8816 else
8817 {
8818 /* MCR */
8819 if (inst.instruction == 0xe000010)
8820 constraint (Rd == REG_PC, BAD_PC);
8821 }
8822
8823 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8824 {
8825 const struct deprecated_coproc_regs_s *r =
8826 deprecated_coproc_regs + i;
8827
8828 if (inst.operands[0].reg == r->cp
8829 && inst.operands[1].imm == r->opc1
8830 && inst.operands[3].reg == r->crn
8831 && inst.operands[4].reg == r->crm
8832 && inst.operands[5].imm == r->opc2)
8833 {
8834 if (! ARM_CPU_IS_ANY (cpu_variant)
8835 && warn_on_deprecated
8836 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8837 as_tsktsk ("%s", r->dep_msg);
8838 }
8839 }
8840
8841 inst.instruction |= inst.operands[0].reg << 8;
8842 inst.instruction |= inst.operands[1].imm << 21;
8843 inst.instruction |= Rd << 12;
8844 inst.instruction |= inst.operands[3].reg << 16;
8845 inst.instruction |= inst.operands[4].reg;
8846 inst.instruction |= inst.operands[5].imm << 5;
8847 }
8848
8849 /* Transfer between coprocessor register and pair of ARM registers.
8850 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8851 MCRR2
8852 MRRC{cond}
8853 MRRC2
8854
8855 Two XScale instructions are special cases of these:
8856
8857 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8858 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8859
8860 Result unpredictable if Rd or Rn is R15. */
8861
8862 static void
8863 do_co_reg2c (void)
8864 {
8865 unsigned Rd, Rn;
8866
8867 Rd = inst.operands[2].reg;
8868 Rn = inst.operands[3].reg;
8869
8870 if (thumb_mode)
8871 {
8872 reject_bad_reg (Rd);
8873 reject_bad_reg (Rn);
8874 }
8875 else
8876 {
8877 constraint (Rd == REG_PC, BAD_PC);
8878 constraint (Rn == REG_PC, BAD_PC);
8879 }
8880
8881 /* Only check the MRRC{2} variants. */
8882 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8883 {
8884 /* If Rd == Rn, error that the operation is
8885 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8886 constraint (Rd == Rn, BAD_OVERLAP);
8887 }
8888
8889 inst.instruction |= inst.operands[0].reg << 8;
8890 inst.instruction |= inst.operands[1].imm << 4;
8891 inst.instruction |= Rd << 12;
8892 inst.instruction |= Rn << 16;
8893 inst.instruction |= inst.operands[4].reg;
8894 }
8895
8896 static void
8897 do_cpsi (void)
8898 {
8899 inst.instruction |= inst.operands[0].imm << 6;
8900 if (inst.operands[1].present)
8901 {
8902 inst.instruction |= CPSI_MMOD;
8903 inst.instruction |= inst.operands[1].imm;
8904 }
8905 }
8906
8907 static void
8908 do_dbg (void)
8909 {
8910 inst.instruction |= inst.operands[0].imm;
8911 }
8912
8913 static void
8914 do_div (void)
8915 {
8916 unsigned Rd, Rn, Rm;
8917
8918 Rd = inst.operands[0].reg;
8919 Rn = (inst.operands[1].present
8920 ? inst.operands[1].reg : Rd);
8921 Rm = inst.operands[2].reg;
8922
8923 constraint ((Rd == REG_PC), BAD_PC);
8924 constraint ((Rn == REG_PC), BAD_PC);
8925 constraint ((Rm == REG_PC), BAD_PC);
8926
8927 inst.instruction |= Rd << 16;
8928 inst.instruction |= Rn << 0;
8929 inst.instruction |= Rm << 8;
8930 }
8931
8932 static void
8933 do_it (void)
8934 {
8935 /* There is no IT instruction in ARM mode. We
8936 process it to do the validation as if in
8937 thumb mode, just in case the code gets
8938 assembled for thumb using the unified syntax. */
8939
8940 inst.size = 0;
8941 if (unified_syntax)
8942 {
8943 set_it_insn_type (IT_INSN);
8944 now_it.mask = (inst.instruction & 0xf) | 0x10;
8945 now_it.cc = inst.operands[0].imm;
8946 }
8947 }
8948
8949 /* If there is only one register in the register list,
8950 then return its register number. Otherwise return -1. */
8951 static int
8952 only_one_reg_in_list (int range)
8953 {
8954 int i = ffs (range) - 1;
8955 return (i > 15 || range != (1 << i)) ? -1 : i;
8956 }
8957
8958 static void
8959 encode_ldmstm(int from_push_pop_mnem)
8960 {
8961 int base_reg = inst.operands[0].reg;
8962 int range = inst.operands[1].imm;
8963 int one_reg;
8964
8965 inst.instruction |= base_reg << 16;
8966 inst.instruction |= range;
8967
8968 if (inst.operands[1].writeback)
8969 inst.instruction |= LDM_TYPE_2_OR_3;
8970
8971 if (inst.operands[0].writeback)
8972 {
8973 inst.instruction |= WRITE_BACK;
8974 /* Check for unpredictable uses of writeback. */
8975 if (inst.instruction & LOAD_BIT)
8976 {
8977 /* Not allowed in LDM type 2. */
8978 if ((inst.instruction & LDM_TYPE_2_OR_3)
8979 && ((range & (1 << REG_PC)) == 0))
8980 as_warn (_("writeback of base register is UNPREDICTABLE"));
8981 /* Only allowed if base reg not in list for other types. */
8982 else if (range & (1 << base_reg))
8983 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8984 }
8985 else /* STM. */
8986 {
8987 /* Not allowed for type 2. */
8988 if (inst.instruction & LDM_TYPE_2_OR_3)
8989 as_warn (_("writeback of base register is UNPREDICTABLE"));
8990 /* Only allowed if base reg not in list, or first in list. */
8991 else if ((range & (1 << base_reg))
8992 && (range & ((1 << base_reg) - 1)))
8993 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8994 }
8995 }
8996
8997 /* If PUSH/POP has only one register, then use the A2 encoding. */
8998 one_reg = only_one_reg_in_list (range);
8999 if (from_push_pop_mnem && one_reg >= 0)
9000 {
9001 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9002
9003 if (is_push && one_reg == 13 /* SP */)
9004 /* PR 22483: The A2 encoding cannot be used when
9005 pushing the stack pointer as this is UNPREDICTABLE. */
9006 return;
9007
9008 inst.instruction &= A_COND_MASK;
9009 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9010 inst.instruction |= one_reg << 12;
9011 }
9012 }
9013
9014 static void
9015 do_ldmstm (void)
9016 {
9017 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9018 }
9019
9020 /* ARMv5TE load-consecutive (argument parse)
9021 Mode is like LDRH.
9022
9023 LDRccD R, mode
9024 STRccD R, mode. */
9025
9026 static void
9027 do_ldrd (void)
9028 {
9029 constraint (inst.operands[0].reg % 2 != 0,
9030 _("first transfer register must be even"));
9031 constraint (inst.operands[1].present
9032 && inst.operands[1].reg != inst.operands[0].reg + 1,
9033 _("can only transfer two consecutive registers"));
9034 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9035 constraint (!inst.operands[2].isreg, _("'[' expected"));
9036
9037 if (!inst.operands[1].present)
9038 inst.operands[1].reg = inst.operands[0].reg + 1;
9039
9040 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9041 register and the first register written; we have to diagnose
9042 overlap between the base and the second register written here. */
9043
9044 if (inst.operands[2].reg == inst.operands[1].reg
9045 && (inst.operands[2].writeback || inst.operands[2].postind))
9046 as_warn (_("base register written back, and overlaps "
9047 "second transfer register"));
9048
9049 if (!(inst.instruction & V4_STR_BIT))
9050 {
9051 /* For an index-register load, the index register must not overlap the
9052 destination (even if not write-back). */
9053 if (inst.operands[2].immisreg
9054 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9055 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9056 as_warn (_("index register overlaps transfer register"));
9057 }
9058 inst.instruction |= inst.operands[0].reg << 12;
9059 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9060 }
9061
9062 static void
9063 do_ldrex (void)
9064 {
9065 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9066 || inst.operands[1].postind || inst.operands[1].writeback
9067 || inst.operands[1].immisreg || inst.operands[1].shifted
9068 || inst.operands[1].negative
9069 /* This can arise if the programmer has written
9070 strex rN, rM, foo
9071 or if they have mistakenly used a register name as the last
9072 operand, eg:
9073 strex rN, rM, rX
9074 It is very difficult to distinguish between these two cases
9075 because "rX" might actually be a label. ie the register
9076 name has been occluded by a symbol of the same name. So we
9077 just generate a general 'bad addressing mode' type error
9078 message and leave it up to the programmer to discover the
9079 true cause and fix their mistake. */
9080 || (inst.operands[1].reg == REG_PC),
9081 BAD_ADDR_MODE);
9082
9083 constraint (inst.relocs[0].exp.X_op != O_constant
9084 || inst.relocs[0].exp.X_add_number != 0,
9085 _("offset must be zero in ARM encoding"));
9086
9087 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9088
9089 inst.instruction |= inst.operands[0].reg << 12;
9090 inst.instruction |= inst.operands[1].reg << 16;
9091 inst.relocs[0].type = BFD_RELOC_UNUSED;
9092 }
9093
9094 static void
9095 do_ldrexd (void)
9096 {
9097 constraint (inst.operands[0].reg % 2 != 0,
9098 _("even register required"));
9099 constraint (inst.operands[1].present
9100 && inst.operands[1].reg != inst.operands[0].reg + 1,
9101 _("can only load two consecutive registers"));
9102 /* If op 1 were present and equal to PC, this function wouldn't
9103 have been called in the first place. */
9104 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9105
9106 inst.instruction |= inst.operands[0].reg << 12;
9107 inst.instruction |= inst.operands[2].reg << 16;
9108 }
9109
9110 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9111 which is not a multiple of four is UNPREDICTABLE. */
9112 static void
9113 check_ldr_r15_aligned (void)
9114 {
9115 constraint (!(inst.operands[1].immisreg)
9116 && (inst.operands[0].reg == REG_PC
9117 && inst.operands[1].reg == REG_PC
9118 && (inst.relocs[0].exp.X_add_number & 0x3)),
9119 _("ldr to register 15 must be 4-byte aligned"));
9120 }
9121
9122 static void
9123 do_ldst (void)
9124 {
9125 inst.instruction |= inst.operands[0].reg << 12;
9126 if (!inst.operands[1].isreg)
9127 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9128 return;
9129 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9130 check_ldr_r15_aligned ();
9131 }
9132
9133 static void
9134 do_ldstt (void)
9135 {
9136 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9137 reject [Rn,...]. */
9138 if (inst.operands[1].preind)
9139 {
9140 constraint (inst.relocs[0].exp.X_op != O_constant
9141 || inst.relocs[0].exp.X_add_number != 0,
9142 _("this instruction requires a post-indexed address"));
9143
9144 inst.operands[1].preind = 0;
9145 inst.operands[1].postind = 1;
9146 inst.operands[1].writeback = 1;
9147 }
9148 inst.instruction |= inst.operands[0].reg << 12;
9149 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9150 }
9151
9152 /* Halfword and signed-byte load/store operations. */
9153
9154 static void
9155 do_ldstv4 (void)
9156 {
9157 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9158 inst.instruction |= inst.operands[0].reg << 12;
9159 if (!inst.operands[1].isreg)
9160 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9161 return;
9162 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9163 }
9164
9165 static void
9166 do_ldsttv4 (void)
9167 {
9168 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9169 reject [Rn,...]. */
9170 if (inst.operands[1].preind)
9171 {
9172 constraint (inst.relocs[0].exp.X_op != O_constant
9173 || inst.relocs[0].exp.X_add_number != 0,
9174 _("this instruction requires a post-indexed address"));
9175
9176 inst.operands[1].preind = 0;
9177 inst.operands[1].postind = 1;
9178 inst.operands[1].writeback = 1;
9179 }
9180 inst.instruction |= inst.operands[0].reg << 12;
9181 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9182 }
9183
9184 /* Co-processor register load/store.
9185 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9186 static void
9187 do_lstc (void)
9188 {
9189 inst.instruction |= inst.operands[0].reg << 8;
9190 inst.instruction |= inst.operands[1].reg << 12;
9191 encode_arm_cp_address (2, TRUE, TRUE, 0);
9192 }
9193
9194 static void
9195 do_mlas (void)
9196 {
9197 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9198 if (inst.operands[0].reg == inst.operands[1].reg
9199 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9200 && !(inst.instruction & 0x00400000))
9201 as_tsktsk (_("Rd and Rm should be different in mla"));
9202
9203 inst.instruction |= inst.operands[0].reg << 16;
9204 inst.instruction |= inst.operands[1].reg;
9205 inst.instruction |= inst.operands[2].reg << 8;
9206 inst.instruction |= inst.operands[3].reg << 12;
9207 }
9208
9209 static void
9210 do_mov (void)
9211 {
9212 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9213 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9214 THUMB1_RELOC_ONLY);
9215 inst.instruction |= inst.operands[0].reg << 12;
9216 encode_arm_shifter_operand (1);
9217 }
9218
9219 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9220 static void
9221 do_mov16 (void)
9222 {
9223 bfd_vma imm;
9224 bfd_boolean top;
9225
9226 top = (inst.instruction & 0x00400000) != 0;
9227 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9228 _(":lower16: not allowed in this instruction"));
9229 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9230 _(":upper16: not allowed in this instruction"));
9231 inst.instruction |= inst.operands[0].reg << 12;
9232 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9233 {
9234 imm = inst.relocs[0].exp.X_add_number;
9235 /* The value is in two pieces: 0:11, 16:19. */
9236 inst.instruction |= (imm & 0x00000fff);
9237 inst.instruction |= (imm & 0x0000f000) << 4;
9238 }
9239 }
9240
9241 static int
9242 do_vfp_nsyn_mrs (void)
9243 {
9244 if (inst.operands[0].isvec)
9245 {
9246 if (inst.operands[1].reg != 1)
9247 first_error (_("operand 1 must be FPSCR"));
9248 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9249 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9250 do_vfp_nsyn_opcode ("fmstat");
9251 }
9252 else if (inst.operands[1].isvec)
9253 do_vfp_nsyn_opcode ("fmrx");
9254 else
9255 return FAIL;
9256
9257 return SUCCESS;
9258 }
9259
9260 static int
9261 do_vfp_nsyn_msr (void)
9262 {
9263 if (inst.operands[0].isvec)
9264 do_vfp_nsyn_opcode ("fmxr");
9265 else
9266 return FAIL;
9267
9268 return SUCCESS;
9269 }
9270
9271 static void
9272 do_vmrs (void)
9273 {
9274 unsigned Rt = inst.operands[0].reg;
9275
9276 if (thumb_mode && Rt == REG_SP)
9277 {
9278 inst.error = BAD_SP;
9279 return;
9280 }
9281
9282 /* MVFR2 is only valid at ARMv8-A. */
9283 if (inst.operands[1].reg == 5)
9284 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9285 _(BAD_FPU));
9286
9287 /* APSR_ sets isvec. All other refs to PC are illegal. */
9288 if (!inst.operands[0].isvec && Rt == REG_PC)
9289 {
9290 inst.error = BAD_PC;
9291 return;
9292 }
9293
9294 /* If we get through parsing the register name, we just insert the number
9295 generated into the instruction without further validation. */
9296 inst.instruction |= (inst.operands[1].reg << 16);
9297 inst.instruction |= (Rt << 12);
9298 }
9299
9300 static void
9301 do_vmsr (void)
9302 {
9303 unsigned Rt = inst.operands[1].reg;
9304
9305 if (thumb_mode)
9306 reject_bad_reg (Rt);
9307 else if (Rt == REG_PC)
9308 {
9309 inst.error = BAD_PC;
9310 return;
9311 }
9312
9313 /* MVFR2 is only valid for ARMv8-A. */
9314 if (inst.operands[0].reg == 5)
9315 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9316 _(BAD_FPU));
9317
9318 /* If we get through parsing the register name, we just insert the number
9319 generated into the instruction without further validation. */
9320 inst.instruction |= (inst.operands[0].reg << 16);
9321 inst.instruction |= (Rt << 12);
9322 }
9323
9324 static void
9325 do_mrs (void)
9326 {
9327 unsigned br;
9328
9329 if (do_vfp_nsyn_mrs () == SUCCESS)
9330 return;
9331
9332 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9333 inst.instruction |= inst.operands[0].reg << 12;
9334
9335 if (inst.operands[1].isreg)
9336 {
9337 br = inst.operands[1].reg;
9338 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9339 as_bad (_("bad register for mrs"));
9340 }
9341 else
9342 {
9343 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9344 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9345 != (PSR_c|PSR_f),
9346 _("'APSR', 'CPSR' or 'SPSR' expected"));
9347 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9348 }
9349
9350 inst.instruction |= br;
9351 }
9352
9353 /* Two possible forms:
9354 "{C|S}PSR_<field>, Rm",
9355 "{C|S}PSR_f, #expression". */
9356
9357 static void
9358 do_msr (void)
9359 {
9360 if (do_vfp_nsyn_msr () == SUCCESS)
9361 return;
9362
9363 inst.instruction |= inst.operands[0].imm;
9364 if (inst.operands[1].isreg)
9365 inst.instruction |= inst.operands[1].reg;
9366 else
9367 {
9368 inst.instruction |= INST_IMMEDIATE;
9369 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9370 inst.relocs[0].pc_rel = 0;
9371 }
9372 }
9373
9374 static void
9375 do_mul (void)
9376 {
9377 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9378
9379 if (!inst.operands[2].present)
9380 inst.operands[2].reg = inst.operands[0].reg;
9381 inst.instruction |= inst.operands[0].reg << 16;
9382 inst.instruction |= inst.operands[1].reg;
9383 inst.instruction |= inst.operands[2].reg << 8;
9384
9385 if (inst.operands[0].reg == inst.operands[1].reg
9386 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9387 as_tsktsk (_("Rd and Rm should be different in mul"));
9388 }
9389
9390 /* Long Multiply Parser
9391 UMULL RdLo, RdHi, Rm, Rs
9392 SMULL RdLo, RdHi, Rm, Rs
9393 UMLAL RdLo, RdHi, Rm, Rs
9394 SMLAL RdLo, RdHi, Rm, Rs. */
9395
9396 static void
9397 do_mull (void)
9398 {
9399 inst.instruction |= inst.operands[0].reg << 12;
9400 inst.instruction |= inst.operands[1].reg << 16;
9401 inst.instruction |= inst.operands[2].reg;
9402 inst.instruction |= inst.operands[3].reg << 8;
9403
9404 /* rdhi and rdlo must be different. */
9405 if (inst.operands[0].reg == inst.operands[1].reg)
9406 as_tsktsk (_("rdhi and rdlo must be different"));
9407
9408 /* rdhi, rdlo and rm must all be different before armv6. */
9409 if ((inst.operands[0].reg == inst.operands[2].reg
9410 || inst.operands[1].reg == inst.operands[2].reg)
9411 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9412 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9413 }
9414
9415 static void
9416 do_nop (void)
9417 {
9418 if (inst.operands[0].present
9419 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9420 {
9421 /* Architectural NOP hints are CPSR sets with no bits selected. */
9422 inst.instruction &= 0xf0000000;
9423 inst.instruction |= 0x0320f000;
9424 if (inst.operands[0].present)
9425 inst.instruction |= inst.operands[0].imm;
9426 }
9427 }
9428
9429 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9430 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9431 Condition defaults to COND_ALWAYS.
9432 Error if Rd, Rn or Rm are R15. */
9433
9434 static void
9435 do_pkhbt (void)
9436 {
9437 inst.instruction |= inst.operands[0].reg << 12;
9438 inst.instruction |= inst.operands[1].reg << 16;
9439 inst.instruction |= inst.operands[2].reg;
9440 if (inst.operands[3].present)
9441 encode_arm_shift (3);
9442 }
9443
9444 /* ARM V6 PKHTB (Argument Parse). */
9445
9446 static void
9447 do_pkhtb (void)
9448 {
9449 if (!inst.operands[3].present)
9450 {
9451 /* If the shift specifier is omitted, turn the instruction
9452 into pkhbt rd, rm, rn. */
9453 inst.instruction &= 0xfff00010;
9454 inst.instruction |= inst.operands[0].reg << 12;
9455 inst.instruction |= inst.operands[1].reg;
9456 inst.instruction |= inst.operands[2].reg << 16;
9457 }
9458 else
9459 {
9460 inst.instruction |= inst.operands[0].reg << 12;
9461 inst.instruction |= inst.operands[1].reg << 16;
9462 inst.instruction |= inst.operands[2].reg;
9463 encode_arm_shift (3);
9464 }
9465 }
9466
9467 /* ARMv5TE: Preload-Cache
9468 MP Extensions: Preload for write
9469
9470 PLD(W) <addr_mode>
9471
9472 Syntactically, like LDR with B=1, W=0, L=1. */
9473
9474 static void
9475 do_pld (void)
9476 {
9477 constraint (!inst.operands[0].isreg,
9478 _("'[' expected after PLD mnemonic"));
9479 constraint (inst.operands[0].postind,
9480 _("post-indexed expression used in preload instruction"));
9481 constraint (inst.operands[0].writeback,
9482 _("writeback used in preload instruction"));
9483 constraint (!inst.operands[0].preind,
9484 _("unindexed addressing used in preload instruction"));
9485 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9486 }
9487
9488 /* ARMv7: PLI <addr_mode> */
9489 static void
9490 do_pli (void)
9491 {
9492 constraint (!inst.operands[0].isreg,
9493 _("'[' expected after PLI mnemonic"));
9494 constraint (inst.operands[0].postind,
9495 _("post-indexed expression used in preload instruction"));
9496 constraint (inst.operands[0].writeback,
9497 _("writeback used in preload instruction"));
9498 constraint (!inst.operands[0].preind,
9499 _("unindexed addressing used in preload instruction"));
9500 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9501 inst.instruction &= ~PRE_INDEX;
9502 }
9503
9504 static void
9505 do_push_pop (void)
9506 {
9507 constraint (inst.operands[0].writeback,
9508 _("push/pop do not support {reglist}^"));
9509 inst.operands[1] = inst.operands[0];
9510 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9511 inst.operands[0].isreg = 1;
9512 inst.operands[0].writeback = 1;
9513 inst.operands[0].reg = REG_SP;
9514 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9515 }
9516
9517 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9518 word at the specified address and the following word
9519 respectively.
9520 Unconditionally executed.
9521 Error if Rn is R15. */
9522
9523 static void
9524 do_rfe (void)
9525 {
9526 inst.instruction |= inst.operands[0].reg << 16;
9527 if (inst.operands[0].writeback)
9528 inst.instruction |= WRITE_BACK;
9529 }
9530
9531 /* ARM V6 ssat (argument parse). */
9532
9533 static void
9534 do_ssat (void)
9535 {
9536 inst.instruction |= inst.operands[0].reg << 12;
9537 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9538 inst.instruction |= inst.operands[2].reg;
9539
9540 if (inst.operands[3].present)
9541 encode_arm_shift (3);
9542 }
9543
9544 /* ARM V6 usat (argument parse). */
9545
9546 static void
9547 do_usat (void)
9548 {
9549 inst.instruction |= inst.operands[0].reg << 12;
9550 inst.instruction |= inst.operands[1].imm << 16;
9551 inst.instruction |= inst.operands[2].reg;
9552
9553 if (inst.operands[3].present)
9554 encode_arm_shift (3);
9555 }
9556
9557 /* ARM V6 ssat16 (argument parse). */
9558
9559 static void
9560 do_ssat16 (void)
9561 {
9562 inst.instruction |= inst.operands[0].reg << 12;
9563 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9564 inst.instruction |= inst.operands[2].reg;
9565 }
9566
9567 static void
9568 do_usat16 (void)
9569 {
9570 inst.instruction |= inst.operands[0].reg << 12;
9571 inst.instruction |= inst.operands[1].imm << 16;
9572 inst.instruction |= inst.operands[2].reg;
9573 }
9574
9575 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9576 preserving the other bits.
9577
9578 setend <endian_specifier>, where <endian_specifier> is either
9579 BE or LE. */
9580
9581 static void
9582 do_setend (void)
9583 {
9584 if (warn_on_deprecated
9585 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9586 as_tsktsk (_("setend use is deprecated for ARMv8"));
9587
9588 if (inst.operands[0].imm)
9589 inst.instruction |= 0x200;
9590 }
9591
9592 static void
9593 do_shift (void)
9594 {
9595 unsigned int Rm = (inst.operands[1].present
9596 ? inst.operands[1].reg
9597 : inst.operands[0].reg);
9598
9599 inst.instruction |= inst.operands[0].reg << 12;
9600 inst.instruction |= Rm;
9601 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9602 {
9603 inst.instruction |= inst.operands[2].reg << 8;
9604 inst.instruction |= SHIFT_BY_REG;
9605 /* PR 12854: Error on extraneous shifts. */
9606 constraint (inst.operands[2].shifted,
9607 _("extraneous shift as part of operand to shift insn"));
9608 }
9609 else
9610 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9611 }
9612
9613 static void
9614 do_smc (void)
9615 {
9616 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9617 inst.relocs[0].pc_rel = 0;
9618 }
9619
9620 static void
9621 do_hvc (void)
9622 {
9623 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9624 inst.relocs[0].pc_rel = 0;
9625 }
9626
9627 static void
9628 do_swi (void)
9629 {
9630 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9631 inst.relocs[0].pc_rel = 0;
9632 }
9633
9634 static void
9635 do_setpan (void)
9636 {
9637 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9638 _("selected processor does not support SETPAN instruction"));
9639
9640 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9641 }
9642
9643 static void
9644 do_t_setpan (void)
9645 {
9646 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9647 _("selected processor does not support SETPAN instruction"));
9648
9649 inst.instruction |= (inst.operands[0].imm << 3);
9650 }
9651
9652 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9653 SMLAxy{cond} Rd,Rm,Rs,Rn
9654 SMLAWy{cond} Rd,Rm,Rs,Rn
9655 Error if any register is R15. */
9656
9657 static void
9658 do_smla (void)
9659 {
9660 inst.instruction |= inst.operands[0].reg << 16;
9661 inst.instruction |= inst.operands[1].reg;
9662 inst.instruction |= inst.operands[2].reg << 8;
9663 inst.instruction |= inst.operands[3].reg << 12;
9664 }
9665
9666 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9667 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9668 Error if any register is R15.
9669 Warning if Rdlo == Rdhi. */
9670
9671 static void
9672 do_smlal (void)
9673 {
9674 inst.instruction |= inst.operands[0].reg << 12;
9675 inst.instruction |= inst.operands[1].reg << 16;
9676 inst.instruction |= inst.operands[2].reg;
9677 inst.instruction |= inst.operands[3].reg << 8;
9678
9679 if (inst.operands[0].reg == inst.operands[1].reg)
9680 as_tsktsk (_("rdhi and rdlo must be different"));
9681 }
9682
9683 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9684 SMULxy{cond} Rd,Rm,Rs
9685 Error if any register is R15. */
9686
9687 static void
9688 do_smul (void)
9689 {
9690 inst.instruction |= inst.operands[0].reg << 16;
9691 inst.instruction |= inst.operands[1].reg;
9692 inst.instruction |= inst.operands[2].reg << 8;
9693 }
9694
9695 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9696 the same for both ARM and Thumb-2. */
9697
9698 static void
9699 do_srs (void)
9700 {
9701 int reg;
9702
9703 if (inst.operands[0].present)
9704 {
9705 reg = inst.operands[0].reg;
9706 constraint (reg != REG_SP, _("SRS base register must be r13"));
9707 }
9708 else
9709 reg = REG_SP;
9710
9711 inst.instruction |= reg << 16;
9712 inst.instruction |= inst.operands[1].imm;
9713 if (inst.operands[0].writeback || inst.operands[1].writeback)
9714 inst.instruction |= WRITE_BACK;
9715 }
9716
9717 /* ARM V6 strex (argument parse). */
9718
9719 static void
9720 do_strex (void)
9721 {
9722 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9723 || inst.operands[2].postind || inst.operands[2].writeback
9724 || inst.operands[2].immisreg || inst.operands[2].shifted
9725 || inst.operands[2].negative
9726 /* See comment in do_ldrex(). */
9727 || (inst.operands[2].reg == REG_PC),
9728 BAD_ADDR_MODE);
9729
9730 constraint (inst.operands[0].reg == inst.operands[1].reg
9731 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9732
9733 constraint (inst.relocs[0].exp.X_op != O_constant
9734 || inst.relocs[0].exp.X_add_number != 0,
9735 _("offset must be zero in ARM encoding"));
9736
9737 inst.instruction |= inst.operands[0].reg << 12;
9738 inst.instruction |= inst.operands[1].reg;
9739 inst.instruction |= inst.operands[2].reg << 16;
9740 inst.relocs[0].type = BFD_RELOC_UNUSED;
9741 }
9742
9743 static void
9744 do_t_strexbh (void)
9745 {
9746 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9747 || inst.operands[2].postind || inst.operands[2].writeback
9748 || inst.operands[2].immisreg || inst.operands[2].shifted
9749 || inst.operands[2].negative,
9750 BAD_ADDR_MODE);
9751
9752 constraint (inst.operands[0].reg == inst.operands[1].reg
9753 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9754
9755 do_rm_rd_rn ();
9756 }
9757
9758 static void
9759 do_strexd (void)
9760 {
9761 constraint (inst.operands[1].reg % 2 != 0,
9762 _("even register required"));
9763 constraint (inst.operands[2].present
9764 && inst.operands[2].reg != inst.operands[1].reg + 1,
9765 _("can only store two consecutive registers"));
9766 /* If op 2 were present and equal to PC, this function wouldn't
9767 have been called in the first place. */
9768 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9769
9770 constraint (inst.operands[0].reg == inst.operands[1].reg
9771 || inst.operands[0].reg == inst.operands[1].reg + 1
9772 || inst.operands[0].reg == inst.operands[3].reg,
9773 BAD_OVERLAP);
9774
9775 inst.instruction |= inst.operands[0].reg << 12;
9776 inst.instruction |= inst.operands[1].reg;
9777 inst.instruction |= inst.operands[3].reg << 16;
9778 }
9779
9780 /* ARM V8 STRL. */
9781 static void
9782 do_stlex (void)
9783 {
9784 constraint (inst.operands[0].reg == inst.operands[1].reg
9785 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9786
9787 do_rd_rm_rn ();
9788 }
9789
9790 static void
9791 do_t_stlex (void)
9792 {
9793 constraint (inst.operands[0].reg == inst.operands[1].reg
9794 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9795
9796 do_rm_rd_rn ();
9797 }
9798
9799 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9800 extends it to 32-bits, and adds the result to a value in another
9801 register. You can specify a rotation by 0, 8, 16, or 24 bits
9802 before extracting the 16-bit value.
9803 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9804 Condition defaults to COND_ALWAYS.
9805 Error if any register uses R15. */
9806
9807 static void
9808 do_sxtah (void)
9809 {
9810 inst.instruction |= inst.operands[0].reg << 12;
9811 inst.instruction |= inst.operands[1].reg << 16;
9812 inst.instruction |= inst.operands[2].reg;
9813 inst.instruction |= inst.operands[3].imm << 10;
9814 }
9815
9816 /* ARM V6 SXTH.
9817
9818 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9819 Condition defaults to COND_ALWAYS.
9820 Error if any register uses R15. */
9821
9822 static void
9823 do_sxth (void)
9824 {
9825 inst.instruction |= inst.operands[0].reg << 12;
9826 inst.instruction |= inst.operands[1].reg;
9827 inst.instruction |= inst.operands[2].imm << 10;
9828 }
9829 \f
9830 /* VFP instructions. In a logical order: SP variant first, monad
9831 before dyad, arithmetic then move then load/store. */
9832
9833 static void
9834 do_vfp_sp_monadic (void)
9835 {
9836 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9837 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9838 }
9839
9840 static void
9841 do_vfp_sp_dyadic (void)
9842 {
9843 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9844 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9845 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9846 }
9847
9848 static void
9849 do_vfp_sp_compare_z (void)
9850 {
9851 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9852 }
9853
9854 static void
9855 do_vfp_dp_sp_cvt (void)
9856 {
9857 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9858 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9859 }
9860
9861 static void
9862 do_vfp_sp_dp_cvt (void)
9863 {
9864 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9865 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9866 }
9867
9868 static void
9869 do_vfp_reg_from_sp (void)
9870 {
9871 inst.instruction |= inst.operands[0].reg << 12;
9872 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9873 }
9874
9875 static void
9876 do_vfp_reg2_from_sp2 (void)
9877 {
9878 constraint (inst.operands[2].imm != 2,
9879 _("only two consecutive VFP SP registers allowed here"));
9880 inst.instruction |= inst.operands[0].reg << 12;
9881 inst.instruction |= inst.operands[1].reg << 16;
9882 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9883 }
9884
9885 static void
9886 do_vfp_sp_from_reg (void)
9887 {
9888 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9889 inst.instruction |= inst.operands[1].reg << 12;
9890 }
9891
9892 static void
9893 do_vfp_sp2_from_reg2 (void)
9894 {
9895 constraint (inst.operands[0].imm != 2,
9896 _("only two consecutive VFP SP registers allowed here"));
9897 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9898 inst.instruction |= inst.operands[1].reg << 12;
9899 inst.instruction |= inst.operands[2].reg << 16;
9900 }
9901
9902 static void
9903 do_vfp_sp_ldst (void)
9904 {
9905 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9906 encode_arm_cp_address (1, FALSE, TRUE, 0);
9907 }
9908
9909 static void
9910 do_vfp_dp_ldst (void)
9911 {
9912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9913 encode_arm_cp_address (1, FALSE, TRUE, 0);
9914 }
9915
9916
9917 static void
9918 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9919 {
9920 if (inst.operands[0].writeback)
9921 inst.instruction |= WRITE_BACK;
9922 else
9923 constraint (ldstm_type != VFP_LDSTMIA,
9924 _("this addressing mode requires base-register writeback"));
9925 inst.instruction |= inst.operands[0].reg << 16;
9926 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9927 inst.instruction |= inst.operands[1].imm;
9928 }
9929
9930 static void
9931 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9932 {
9933 int count;
9934
9935 if (inst.operands[0].writeback)
9936 inst.instruction |= WRITE_BACK;
9937 else
9938 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9939 _("this addressing mode requires base-register writeback"));
9940
9941 inst.instruction |= inst.operands[0].reg << 16;
9942 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9943
9944 count = inst.operands[1].imm << 1;
9945 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9946 count += 1;
9947
9948 inst.instruction |= count;
9949 }
9950
9951 static void
9952 do_vfp_sp_ldstmia (void)
9953 {
9954 vfp_sp_ldstm (VFP_LDSTMIA);
9955 }
9956
9957 static void
9958 do_vfp_sp_ldstmdb (void)
9959 {
9960 vfp_sp_ldstm (VFP_LDSTMDB);
9961 }
9962
9963 static void
9964 do_vfp_dp_ldstmia (void)
9965 {
9966 vfp_dp_ldstm (VFP_LDSTMIA);
9967 }
9968
9969 static void
9970 do_vfp_dp_ldstmdb (void)
9971 {
9972 vfp_dp_ldstm (VFP_LDSTMDB);
9973 }
9974
9975 static void
9976 do_vfp_xp_ldstmia (void)
9977 {
9978 vfp_dp_ldstm (VFP_LDSTMIAX);
9979 }
9980
9981 static void
9982 do_vfp_xp_ldstmdb (void)
9983 {
9984 vfp_dp_ldstm (VFP_LDSTMDBX);
9985 }
9986
9987 static void
9988 do_vfp_dp_rd_rm (void)
9989 {
9990 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9991 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9992 }
9993
9994 static void
9995 do_vfp_dp_rn_rd (void)
9996 {
9997 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9998 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9999 }
10000
10001 static void
10002 do_vfp_dp_rd_rn (void)
10003 {
10004 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10005 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10006 }
10007
10008 static void
10009 do_vfp_dp_rd_rn_rm (void)
10010 {
10011 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10012 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10013 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10014 }
10015
10016 static void
10017 do_vfp_dp_rd (void)
10018 {
10019 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10020 }
10021
10022 static void
10023 do_vfp_dp_rm_rd_rn (void)
10024 {
10025 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10026 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10027 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10028 }
10029
10030 /* VFPv3 instructions. */
10031 static void
10032 do_vfp_sp_const (void)
10033 {
10034 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10035 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10036 inst.instruction |= (inst.operands[1].imm & 0x0f);
10037 }
10038
10039 static void
10040 do_vfp_dp_const (void)
10041 {
10042 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10043 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10044 inst.instruction |= (inst.operands[1].imm & 0x0f);
10045 }
10046
10047 static void
10048 vfp_conv (int srcsize)
10049 {
10050 int immbits = srcsize - inst.operands[1].imm;
10051
10052 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10053 {
10054 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10055 i.e. immbits must be in range 0 - 16. */
10056 inst.error = _("immediate value out of range, expected range [0, 16]");
10057 return;
10058 }
10059 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10060 {
10061 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10062 i.e. immbits must be in range 0 - 31. */
10063 inst.error = _("immediate value out of range, expected range [1, 32]");
10064 return;
10065 }
10066
10067 inst.instruction |= (immbits & 1) << 5;
10068 inst.instruction |= (immbits >> 1);
10069 }
10070
10071 static void
10072 do_vfp_sp_conv_16 (void)
10073 {
10074 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10075 vfp_conv (16);
10076 }
10077
10078 static void
10079 do_vfp_dp_conv_16 (void)
10080 {
10081 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10082 vfp_conv (16);
10083 }
10084
10085 static void
10086 do_vfp_sp_conv_32 (void)
10087 {
10088 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10089 vfp_conv (32);
10090 }
10091
10092 static void
10093 do_vfp_dp_conv_32 (void)
10094 {
10095 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10096 vfp_conv (32);
10097 }
10098 \f
10099 /* FPA instructions. Also in a logical order. */
10100
10101 static void
10102 do_fpa_cmp (void)
10103 {
10104 inst.instruction |= inst.operands[0].reg << 16;
10105 inst.instruction |= inst.operands[1].reg;
10106 }
10107
10108 static void
10109 do_fpa_ldmstm (void)
10110 {
10111 inst.instruction |= inst.operands[0].reg << 12;
10112 switch (inst.operands[1].imm)
10113 {
10114 case 1: inst.instruction |= CP_T_X; break;
10115 case 2: inst.instruction |= CP_T_Y; break;
10116 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10117 case 4: break;
10118 default: abort ();
10119 }
10120
10121 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10122 {
10123 /* The instruction specified "ea" or "fd", so we can only accept
10124 [Rn]{!}. The instruction does not really support stacking or
10125 unstacking, so we have to emulate these by setting appropriate
10126 bits and offsets. */
10127 constraint (inst.relocs[0].exp.X_op != O_constant
10128 || inst.relocs[0].exp.X_add_number != 0,
10129 _("this instruction does not support indexing"));
10130
10131 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10132 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10133
10134 if (!(inst.instruction & INDEX_UP))
10135 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10136
10137 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10138 {
10139 inst.operands[2].preind = 0;
10140 inst.operands[2].postind = 1;
10141 }
10142 }
10143
10144 encode_arm_cp_address (2, TRUE, TRUE, 0);
10145 }
10146 \f
10147 /* iWMMXt instructions: strictly in alphabetical order. */
10148
10149 static void
10150 do_iwmmxt_tandorc (void)
10151 {
10152 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10153 }
10154
10155 static void
10156 do_iwmmxt_textrc (void)
10157 {
10158 inst.instruction |= inst.operands[0].reg << 12;
10159 inst.instruction |= inst.operands[1].imm;
10160 }
10161
10162 static void
10163 do_iwmmxt_textrm (void)
10164 {
10165 inst.instruction |= inst.operands[0].reg << 12;
10166 inst.instruction |= inst.operands[1].reg << 16;
10167 inst.instruction |= inst.operands[2].imm;
10168 }
10169
10170 static void
10171 do_iwmmxt_tinsr (void)
10172 {
10173 inst.instruction |= inst.operands[0].reg << 16;
10174 inst.instruction |= inst.operands[1].reg << 12;
10175 inst.instruction |= inst.operands[2].imm;
10176 }
10177
10178 static void
10179 do_iwmmxt_tmia (void)
10180 {
10181 inst.instruction |= inst.operands[0].reg << 5;
10182 inst.instruction |= inst.operands[1].reg;
10183 inst.instruction |= inst.operands[2].reg << 12;
10184 }
10185
10186 static void
10187 do_iwmmxt_waligni (void)
10188 {
10189 inst.instruction |= inst.operands[0].reg << 12;
10190 inst.instruction |= inst.operands[1].reg << 16;
10191 inst.instruction |= inst.operands[2].reg;
10192 inst.instruction |= inst.operands[3].imm << 20;
10193 }
10194
10195 static void
10196 do_iwmmxt_wmerge (void)
10197 {
10198 inst.instruction |= inst.operands[0].reg << 12;
10199 inst.instruction |= inst.operands[1].reg << 16;
10200 inst.instruction |= inst.operands[2].reg;
10201 inst.instruction |= inst.operands[3].imm << 21;
10202 }
10203
10204 static void
10205 do_iwmmxt_wmov (void)
10206 {
10207 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10208 inst.instruction |= inst.operands[0].reg << 12;
10209 inst.instruction |= inst.operands[1].reg << 16;
10210 inst.instruction |= inst.operands[1].reg;
10211 }
10212
10213 static void
10214 do_iwmmxt_wldstbh (void)
10215 {
10216 int reloc;
10217 inst.instruction |= inst.operands[0].reg << 12;
10218 if (thumb_mode)
10219 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10220 else
10221 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10222 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10223 }
10224
10225 static void
10226 do_iwmmxt_wldstw (void)
10227 {
10228 /* RIWR_RIWC clears .isreg for a control register. */
10229 if (!inst.operands[0].isreg)
10230 {
10231 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10232 inst.instruction |= 0xf0000000;
10233 }
10234
10235 inst.instruction |= inst.operands[0].reg << 12;
10236 encode_arm_cp_address (1, TRUE, TRUE, 0);
10237 }
10238
10239 static void
10240 do_iwmmxt_wldstd (void)
10241 {
10242 inst.instruction |= inst.operands[0].reg << 12;
10243 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10244 && inst.operands[1].immisreg)
10245 {
10246 inst.instruction &= ~0x1a000ff;
10247 inst.instruction |= (0xfU << 28);
10248 if (inst.operands[1].preind)
10249 inst.instruction |= PRE_INDEX;
10250 if (!inst.operands[1].negative)
10251 inst.instruction |= INDEX_UP;
10252 if (inst.operands[1].writeback)
10253 inst.instruction |= WRITE_BACK;
10254 inst.instruction |= inst.operands[1].reg << 16;
10255 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10256 inst.instruction |= inst.operands[1].imm;
10257 }
10258 else
10259 encode_arm_cp_address (1, TRUE, FALSE, 0);
10260 }
10261
10262 static void
10263 do_iwmmxt_wshufh (void)
10264 {
10265 inst.instruction |= inst.operands[0].reg << 12;
10266 inst.instruction |= inst.operands[1].reg << 16;
10267 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10268 inst.instruction |= (inst.operands[2].imm & 0x0f);
10269 }
10270
10271 static void
10272 do_iwmmxt_wzero (void)
10273 {
10274 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10275 inst.instruction |= inst.operands[0].reg;
10276 inst.instruction |= inst.operands[0].reg << 12;
10277 inst.instruction |= inst.operands[0].reg << 16;
10278 }
10279
10280 static void
10281 do_iwmmxt_wrwrwr_or_imm5 (void)
10282 {
10283 if (inst.operands[2].isreg)
10284 do_rd_rn_rm ();
10285 else {
10286 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10287 _("immediate operand requires iWMMXt2"));
10288 do_rd_rn ();
10289 if (inst.operands[2].imm == 0)
10290 {
10291 switch ((inst.instruction >> 20) & 0xf)
10292 {
10293 case 4:
10294 case 5:
10295 case 6:
10296 case 7:
10297 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10298 inst.operands[2].imm = 16;
10299 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10300 break;
10301 case 8:
10302 case 9:
10303 case 10:
10304 case 11:
10305 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10306 inst.operands[2].imm = 32;
10307 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10308 break;
10309 case 12:
10310 case 13:
10311 case 14:
10312 case 15:
10313 {
10314 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10315 unsigned long wrn;
10316 wrn = (inst.instruction >> 16) & 0xf;
10317 inst.instruction &= 0xff0fff0f;
10318 inst.instruction |= wrn;
10319 /* Bail out here; the instruction is now assembled. */
10320 return;
10321 }
10322 }
10323 }
10324 /* Map 32 -> 0, etc. */
10325 inst.operands[2].imm &= 0x1f;
10326 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10327 }
10328 }
10329 \f
10330 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10331 operations first, then control, shift, and load/store. */
10332
10333 /* Insns like "foo X,Y,Z". */
10334
10335 static void
10336 do_mav_triple (void)
10337 {
10338 inst.instruction |= inst.operands[0].reg << 16;
10339 inst.instruction |= inst.operands[1].reg;
10340 inst.instruction |= inst.operands[2].reg << 12;
10341 }
10342
10343 /* Insns like "foo W,X,Y,Z".
10344 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10345
10346 static void
10347 do_mav_quad (void)
10348 {
10349 inst.instruction |= inst.operands[0].reg << 5;
10350 inst.instruction |= inst.operands[1].reg << 12;
10351 inst.instruction |= inst.operands[2].reg << 16;
10352 inst.instruction |= inst.operands[3].reg;
10353 }
10354
10355 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10356 static void
10357 do_mav_dspsc (void)
10358 {
10359 inst.instruction |= inst.operands[1].reg << 12;
10360 }
10361
10362 /* Maverick shift immediate instructions.
10363 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10364 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10365
10366 static void
10367 do_mav_shift (void)
10368 {
10369 int imm = inst.operands[2].imm;
10370
10371 inst.instruction |= inst.operands[0].reg << 12;
10372 inst.instruction |= inst.operands[1].reg << 16;
10373
10374 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10375 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10376 Bit 4 should be 0. */
10377 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10378
10379 inst.instruction |= imm;
10380 }
10381 \f
10382 /* XScale instructions. Also sorted arithmetic before move. */
10383
10384 /* Xscale multiply-accumulate (argument parse)
10385 MIAcc acc0,Rm,Rs
10386 MIAPHcc acc0,Rm,Rs
10387 MIAxycc acc0,Rm,Rs. */
10388
10389 static void
10390 do_xsc_mia (void)
10391 {
10392 inst.instruction |= inst.operands[1].reg;
10393 inst.instruction |= inst.operands[2].reg << 12;
10394 }
10395
10396 /* Xscale move-accumulator-register (argument parse)
10397
10398 MARcc acc0,RdLo,RdHi. */
10399
10400 static void
10401 do_xsc_mar (void)
10402 {
10403 inst.instruction |= inst.operands[1].reg << 12;
10404 inst.instruction |= inst.operands[2].reg << 16;
10405 }
10406
10407 /* Xscale move-register-accumulator (argument parse)
10408
10409 MRAcc RdLo,RdHi,acc0. */
10410
10411 static void
10412 do_xsc_mra (void)
10413 {
10414 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10415 inst.instruction |= inst.operands[0].reg << 12;
10416 inst.instruction |= inst.operands[1].reg << 16;
10417 }
10418 \f
10419 /* Encoding functions relevant only to Thumb. */
10420
10421 /* inst.operands[i] is a shifted-register operand; encode
10422 it into inst.instruction in the format used by Thumb32. */
10423
10424 static void
10425 encode_thumb32_shifted_operand (int i)
10426 {
10427 unsigned int value = inst.relocs[0].exp.X_add_number;
10428 unsigned int shift = inst.operands[i].shift_kind;
10429
10430 constraint (inst.operands[i].immisreg,
10431 _("shift by register not allowed in thumb mode"));
10432 inst.instruction |= inst.operands[i].reg;
10433 if (shift == SHIFT_RRX)
10434 inst.instruction |= SHIFT_ROR << 4;
10435 else
10436 {
10437 constraint (inst.relocs[0].exp.X_op != O_constant,
10438 _("expression too complex"));
10439
10440 constraint (value > 32
10441 || (value == 32 && (shift == SHIFT_LSL
10442 || shift == SHIFT_ROR)),
10443 _("shift expression is too large"));
10444
10445 if (value == 0)
10446 shift = SHIFT_LSL;
10447 else if (value == 32)
10448 value = 0;
10449
10450 inst.instruction |= shift << 4;
10451 inst.instruction |= (value & 0x1c) << 10;
10452 inst.instruction |= (value & 0x03) << 6;
10453 }
10454 }
10455
10456
10457 /* inst.operands[i] was set up by parse_address. Encode it into a
10458 Thumb32 format load or store instruction. Reject forms that cannot
10459 be used with such instructions. If is_t is true, reject forms that
10460 cannot be used with a T instruction; if is_d is true, reject forms
10461 that cannot be used with a D instruction. If it is a store insn,
10462 reject PC in Rn. */
10463
10464 static void
10465 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10466 {
10467 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10468
10469 constraint (!inst.operands[i].isreg,
10470 _("Instruction does not support =N addresses"));
10471
10472 inst.instruction |= inst.operands[i].reg << 16;
10473 if (inst.operands[i].immisreg)
10474 {
10475 constraint (is_pc, BAD_PC_ADDRESSING);
10476 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10477 constraint (inst.operands[i].negative,
10478 _("Thumb does not support negative register indexing"));
10479 constraint (inst.operands[i].postind,
10480 _("Thumb does not support register post-indexing"));
10481 constraint (inst.operands[i].writeback,
10482 _("Thumb does not support register indexing with writeback"));
10483 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10484 _("Thumb supports only LSL in shifted register indexing"));
10485
10486 inst.instruction |= inst.operands[i].imm;
10487 if (inst.operands[i].shifted)
10488 {
10489 constraint (inst.relocs[0].exp.X_op != O_constant,
10490 _("expression too complex"));
10491 constraint (inst.relocs[0].exp.X_add_number < 0
10492 || inst.relocs[0].exp.X_add_number > 3,
10493 _("shift out of range"));
10494 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10495 }
10496 inst.relocs[0].type = BFD_RELOC_UNUSED;
10497 }
10498 else if (inst.operands[i].preind)
10499 {
10500 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10501 constraint (is_t && inst.operands[i].writeback,
10502 _("cannot use writeback with this instruction"));
10503 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10504 BAD_PC_ADDRESSING);
10505
10506 if (is_d)
10507 {
10508 inst.instruction |= 0x01000000;
10509 if (inst.operands[i].writeback)
10510 inst.instruction |= 0x00200000;
10511 }
10512 else
10513 {
10514 inst.instruction |= 0x00000c00;
10515 if (inst.operands[i].writeback)
10516 inst.instruction |= 0x00000100;
10517 }
10518 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10519 }
10520 else if (inst.operands[i].postind)
10521 {
10522 gas_assert (inst.operands[i].writeback);
10523 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10524 constraint (is_t, _("cannot use post-indexing with this instruction"));
10525
10526 if (is_d)
10527 inst.instruction |= 0x00200000;
10528 else
10529 inst.instruction |= 0x00000900;
10530 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10531 }
10532 else /* unindexed - only for coprocessor */
10533 inst.error = _("instruction does not accept unindexed addressing");
10534 }
10535
10536 /* Table of Thumb instructions which exist in both 16- and 32-bit
10537 encodings (the latter only in post-V6T2 cores). The index is the
10538 value used in the insns table below. When there is more than one
10539 possible 16-bit encoding for the instruction, this table always
10540 holds variant (1).
10541 Also contains several pseudo-instructions used during relaxation. */
10542 #define T16_32_TAB \
10543 X(_adc, 4140, eb400000), \
10544 X(_adcs, 4140, eb500000), \
10545 X(_add, 1c00, eb000000), \
10546 X(_adds, 1c00, eb100000), \
10547 X(_addi, 0000, f1000000), \
10548 X(_addis, 0000, f1100000), \
10549 X(_add_pc,000f, f20f0000), \
10550 X(_add_sp,000d, f10d0000), \
10551 X(_adr, 000f, f20f0000), \
10552 X(_and, 4000, ea000000), \
10553 X(_ands, 4000, ea100000), \
10554 X(_asr, 1000, fa40f000), \
10555 X(_asrs, 1000, fa50f000), \
10556 X(_b, e000, f000b000), \
10557 X(_bcond, d000, f0008000), \
10558 X(_bf, 0000, f040e001), \
10559 X(_bfcsel,0000, f000e001), \
10560 X(_bfx, 0000, f060e001), \
10561 X(_bfl, 0000, f000c001), \
10562 X(_bflx, 0000, f070e001), \
10563 X(_bic, 4380, ea200000), \
10564 X(_bics, 4380, ea300000), \
10565 X(_cmn, 42c0, eb100f00), \
10566 X(_cmp, 2800, ebb00f00), \
10567 X(_cpsie, b660, f3af8400), \
10568 X(_cpsid, b670, f3af8600), \
10569 X(_cpy, 4600, ea4f0000), \
10570 X(_dec_sp,80dd, f1ad0d00), \
10571 X(_dls, 0000, f040e001), \
10572 X(_eor, 4040, ea800000), \
10573 X(_eors, 4040, ea900000), \
10574 X(_inc_sp,00dd, f10d0d00), \
10575 X(_ldmia, c800, e8900000), \
10576 X(_ldr, 6800, f8500000), \
10577 X(_ldrb, 7800, f8100000), \
10578 X(_ldrh, 8800, f8300000), \
10579 X(_ldrsb, 5600, f9100000), \
10580 X(_ldrsh, 5e00, f9300000), \
10581 X(_ldr_pc,4800, f85f0000), \
10582 X(_ldr_pc2,4800, f85f0000), \
10583 X(_ldr_sp,9800, f85d0000), \
10584 X(_le, 0000, f00fc001), \
10585 X(_lsl, 0000, fa00f000), \
10586 X(_lsls, 0000, fa10f000), \
10587 X(_lsr, 0800, fa20f000), \
10588 X(_lsrs, 0800, fa30f000), \
10589 X(_mov, 2000, ea4f0000), \
10590 X(_movs, 2000, ea5f0000), \
10591 X(_mul, 4340, fb00f000), \
10592 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10593 X(_mvn, 43c0, ea6f0000), \
10594 X(_mvns, 43c0, ea7f0000), \
10595 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10596 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10597 X(_orr, 4300, ea400000), \
10598 X(_orrs, 4300, ea500000), \
10599 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10600 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10601 X(_rev, ba00, fa90f080), \
10602 X(_rev16, ba40, fa90f090), \
10603 X(_revsh, bac0, fa90f0b0), \
10604 X(_ror, 41c0, fa60f000), \
10605 X(_rors, 41c0, fa70f000), \
10606 X(_sbc, 4180, eb600000), \
10607 X(_sbcs, 4180, eb700000), \
10608 X(_stmia, c000, e8800000), \
10609 X(_str, 6000, f8400000), \
10610 X(_strb, 7000, f8000000), \
10611 X(_strh, 8000, f8200000), \
10612 X(_str_sp,9000, f84d0000), \
10613 X(_sub, 1e00, eba00000), \
10614 X(_subs, 1e00, ebb00000), \
10615 X(_subi, 8000, f1a00000), \
10616 X(_subis, 8000, f1b00000), \
10617 X(_sxtb, b240, fa4ff080), \
10618 X(_sxth, b200, fa0ff080), \
10619 X(_tst, 4200, ea100f00), \
10620 X(_uxtb, b2c0, fa5ff080), \
10621 X(_uxth, b280, fa1ff080), \
10622 X(_nop, bf00, f3af8000), \
10623 X(_yield, bf10, f3af8001), \
10624 X(_wfe, bf20, f3af8002), \
10625 X(_wfi, bf30, f3af8003), \
10626 X(_wls, 0000, f040c001), \
10627 X(_sev, bf40, f3af8004), \
10628 X(_sevl, bf50, f3af8005), \
10629 X(_udf, de00, f7f0a000)
10630
10631 /* To catch errors in encoding functions, the codes are all offset by
10632 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10633 as 16-bit instructions. */
10634 #define X(a,b,c) T_MNEM##a
10635 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10636 #undef X
10637
10638 #define X(a,b,c) 0x##b
10639 static const unsigned short thumb_op16[] = { T16_32_TAB };
10640 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10641 #undef X
10642
10643 #define X(a,b,c) 0x##c
10644 static const unsigned int thumb_op32[] = { T16_32_TAB };
10645 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10646 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10647 #undef X
10648 #undef T16_32_TAB
10649
10650 /* Thumb instruction encoders, in alphabetical order. */
10651
10652 /* ADDW or SUBW. */
10653
10654 static void
10655 do_t_add_sub_w (void)
10656 {
10657 int Rd, Rn;
10658
10659 Rd = inst.operands[0].reg;
10660 Rn = inst.operands[1].reg;
10661
10662 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10663 is the SP-{plus,minus}-immediate form of the instruction. */
10664 if (Rn == REG_SP)
10665 constraint (Rd == REG_PC, BAD_PC);
10666 else
10667 reject_bad_reg (Rd);
10668
10669 inst.instruction |= (Rn << 16) | (Rd << 8);
10670 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10671 }
10672
10673 /* Parse an add or subtract instruction. We get here with inst.instruction
10674 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10675
10676 static void
10677 do_t_add_sub (void)
10678 {
10679 int Rd, Rs, Rn;
10680
10681 Rd = inst.operands[0].reg;
10682 Rs = (inst.operands[1].present
10683 ? inst.operands[1].reg /* Rd, Rs, foo */
10684 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10685
10686 if (Rd == REG_PC)
10687 set_it_insn_type_last ();
10688
10689 if (unified_syntax)
10690 {
10691 bfd_boolean flags;
10692 bfd_boolean narrow;
10693 int opcode;
10694
10695 flags = (inst.instruction == T_MNEM_adds
10696 || inst.instruction == T_MNEM_subs);
10697 if (flags)
10698 narrow = !in_it_block ();
10699 else
10700 narrow = in_it_block ();
10701 if (!inst.operands[2].isreg)
10702 {
10703 int add;
10704
10705 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10706 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10707
10708 add = (inst.instruction == T_MNEM_add
10709 || inst.instruction == T_MNEM_adds);
10710 opcode = 0;
10711 if (inst.size_req != 4)
10712 {
10713 /* Attempt to use a narrow opcode, with relaxation if
10714 appropriate. */
10715 if (Rd == REG_SP && Rs == REG_SP && !flags)
10716 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10717 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10718 opcode = T_MNEM_add_sp;
10719 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10720 opcode = T_MNEM_add_pc;
10721 else if (Rd <= 7 && Rs <= 7 && narrow)
10722 {
10723 if (flags)
10724 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10725 else
10726 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10727 }
10728 if (opcode)
10729 {
10730 inst.instruction = THUMB_OP16(opcode);
10731 inst.instruction |= (Rd << 4) | Rs;
10732 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10733 || (inst.relocs[0].type
10734 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
10735 {
10736 if (inst.size_req == 2)
10737 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10738 else
10739 inst.relax = opcode;
10740 }
10741 }
10742 else
10743 constraint (inst.size_req == 2, BAD_HIREG);
10744 }
10745 if (inst.size_req == 4
10746 || (inst.size_req != 2 && !opcode))
10747 {
10748 constraint ((inst.relocs[0].type
10749 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
10750 && (inst.relocs[0].type
10751 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
10752 THUMB1_RELOC_ONLY);
10753 if (Rd == REG_PC)
10754 {
10755 constraint (add, BAD_PC);
10756 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10757 _("only SUBS PC, LR, #const allowed"));
10758 constraint (inst.relocs[0].exp.X_op != O_constant,
10759 _("expression too complex"));
10760 constraint (inst.relocs[0].exp.X_add_number < 0
10761 || inst.relocs[0].exp.X_add_number > 0xff,
10762 _("immediate value out of range"));
10763 inst.instruction = T2_SUBS_PC_LR
10764 | inst.relocs[0].exp.X_add_number;
10765 inst.relocs[0].type = BFD_RELOC_UNUSED;
10766 return;
10767 }
10768 else if (Rs == REG_PC)
10769 {
10770 /* Always use addw/subw. */
10771 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10772 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10773 }
10774 else
10775 {
10776 inst.instruction = THUMB_OP32 (inst.instruction);
10777 inst.instruction = (inst.instruction & 0xe1ffffff)
10778 | 0x10000000;
10779 if (flags)
10780 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
10781 else
10782 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
10783 }
10784 inst.instruction |= Rd << 8;
10785 inst.instruction |= Rs << 16;
10786 }
10787 }
10788 else
10789 {
10790 unsigned int value = inst.relocs[0].exp.X_add_number;
10791 unsigned int shift = inst.operands[2].shift_kind;
10792
10793 Rn = inst.operands[2].reg;
10794 /* See if we can do this with a 16-bit instruction. */
10795 if (!inst.operands[2].shifted && inst.size_req != 4)
10796 {
10797 if (Rd > 7 || Rs > 7 || Rn > 7)
10798 narrow = FALSE;
10799
10800 if (narrow)
10801 {
10802 inst.instruction = ((inst.instruction == T_MNEM_adds
10803 || inst.instruction == T_MNEM_add)
10804 ? T_OPCODE_ADD_R3
10805 : T_OPCODE_SUB_R3);
10806 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10807 return;
10808 }
10809
10810 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10811 {
10812 /* Thumb-1 cores (except v6-M) require at least one high
10813 register in a narrow non flag setting add. */
10814 if (Rd > 7 || Rn > 7
10815 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10816 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10817 {
10818 if (Rd == Rn)
10819 {
10820 Rn = Rs;
10821 Rs = Rd;
10822 }
10823 inst.instruction = T_OPCODE_ADD_HI;
10824 inst.instruction |= (Rd & 8) << 4;
10825 inst.instruction |= (Rd & 7);
10826 inst.instruction |= Rn << 3;
10827 return;
10828 }
10829 }
10830 }
10831
10832 constraint (Rd == REG_PC, BAD_PC);
10833 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10834 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10835 constraint (Rs == REG_PC, BAD_PC);
10836 reject_bad_reg (Rn);
10837
10838 /* If we get here, it can't be done in 16 bits. */
10839 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10840 _("shift must be constant"));
10841 inst.instruction = THUMB_OP32 (inst.instruction);
10842 inst.instruction |= Rd << 8;
10843 inst.instruction |= Rs << 16;
10844 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10845 _("shift value over 3 not allowed in thumb mode"));
10846 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10847 _("only LSL shift allowed in thumb mode"));
10848 encode_thumb32_shifted_operand (2);
10849 }
10850 }
10851 else
10852 {
10853 constraint (inst.instruction == T_MNEM_adds
10854 || inst.instruction == T_MNEM_subs,
10855 BAD_THUMB32);
10856
10857 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10858 {
10859 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10860 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10861 BAD_HIREG);
10862
10863 inst.instruction = (inst.instruction == T_MNEM_add
10864 ? 0x0000 : 0x8000);
10865 inst.instruction |= (Rd << 4) | Rs;
10866 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10867 return;
10868 }
10869
10870 Rn = inst.operands[2].reg;
10871 constraint (inst.operands[2].shifted, _("unshifted register required"));
10872
10873 /* We now have Rd, Rs, and Rn set to registers. */
10874 if (Rd > 7 || Rs > 7 || Rn > 7)
10875 {
10876 /* Can't do this for SUB. */
10877 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10878 inst.instruction = T_OPCODE_ADD_HI;
10879 inst.instruction |= (Rd & 8) << 4;
10880 inst.instruction |= (Rd & 7);
10881 if (Rs == Rd)
10882 inst.instruction |= Rn << 3;
10883 else if (Rn == Rd)
10884 inst.instruction |= Rs << 3;
10885 else
10886 constraint (1, _("dest must overlap one source register"));
10887 }
10888 else
10889 {
10890 inst.instruction = (inst.instruction == T_MNEM_add
10891 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10892 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10893 }
10894 }
10895 }
10896
10897 static void
10898 do_t_adr (void)
10899 {
10900 unsigned Rd;
10901
10902 Rd = inst.operands[0].reg;
10903 reject_bad_reg (Rd);
10904
10905 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10906 {
10907 /* Defer to section relaxation. */
10908 inst.relax = inst.instruction;
10909 inst.instruction = THUMB_OP16 (inst.instruction);
10910 inst.instruction |= Rd << 4;
10911 }
10912 else if (unified_syntax && inst.size_req != 2)
10913 {
10914 /* Generate a 32-bit opcode. */
10915 inst.instruction = THUMB_OP32 (inst.instruction);
10916 inst.instruction |= Rd << 8;
10917 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
10918 inst.relocs[0].pc_rel = 1;
10919 }
10920 else
10921 {
10922 /* Generate a 16-bit opcode. */
10923 inst.instruction = THUMB_OP16 (inst.instruction);
10924 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10925 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
10926 inst.relocs[0].pc_rel = 1;
10927 inst.instruction |= Rd << 4;
10928 }
10929
10930 if (inst.relocs[0].exp.X_op == O_symbol
10931 && inst.relocs[0].exp.X_add_symbol != NULL
10932 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
10933 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
10934 inst.relocs[0].exp.X_add_number += 1;
10935 }
10936
10937 /* Arithmetic instructions for which there is just one 16-bit
10938 instruction encoding, and it allows only two low registers.
10939 For maximal compatibility with ARM syntax, we allow three register
10940 operands even when Thumb-32 instructions are not available, as long
10941 as the first two are identical. For instance, both "sbc r0,r1" and
10942 "sbc r0,r0,r1" are allowed. */
10943 static void
10944 do_t_arit3 (void)
10945 {
10946 int Rd, Rs, Rn;
10947
10948 Rd = inst.operands[0].reg;
10949 Rs = (inst.operands[1].present
10950 ? inst.operands[1].reg /* Rd, Rs, foo */
10951 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10952 Rn = inst.operands[2].reg;
10953
10954 reject_bad_reg (Rd);
10955 reject_bad_reg (Rs);
10956 if (inst.operands[2].isreg)
10957 reject_bad_reg (Rn);
10958
10959 if (unified_syntax)
10960 {
10961 if (!inst.operands[2].isreg)
10962 {
10963 /* For an immediate, we always generate a 32-bit opcode;
10964 section relaxation will shrink it later if possible. */
10965 inst.instruction = THUMB_OP32 (inst.instruction);
10966 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10967 inst.instruction |= Rd << 8;
10968 inst.instruction |= Rs << 16;
10969 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
10970 }
10971 else
10972 {
10973 bfd_boolean narrow;
10974
10975 /* See if we can do this with a 16-bit instruction. */
10976 if (THUMB_SETS_FLAGS (inst.instruction))
10977 narrow = !in_it_block ();
10978 else
10979 narrow = in_it_block ();
10980
10981 if (Rd > 7 || Rn > 7 || Rs > 7)
10982 narrow = FALSE;
10983 if (inst.operands[2].shifted)
10984 narrow = FALSE;
10985 if (inst.size_req == 4)
10986 narrow = FALSE;
10987
10988 if (narrow
10989 && Rd == Rs)
10990 {
10991 inst.instruction = THUMB_OP16 (inst.instruction);
10992 inst.instruction |= Rd;
10993 inst.instruction |= Rn << 3;
10994 return;
10995 }
10996
10997 /* If we get here, it can't be done in 16 bits. */
10998 constraint (inst.operands[2].shifted
10999 && inst.operands[2].immisreg,
11000 _("shift must be constant"));
11001 inst.instruction = THUMB_OP32 (inst.instruction);
11002 inst.instruction |= Rd << 8;
11003 inst.instruction |= Rs << 16;
11004 encode_thumb32_shifted_operand (2);
11005 }
11006 }
11007 else
11008 {
11009 /* On its face this is a lie - the instruction does set the
11010 flags. However, the only supported mnemonic in this mode
11011 says it doesn't. */
11012 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11013
11014 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11015 _("unshifted register required"));
11016 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11017 constraint (Rd != Rs,
11018 _("dest and source1 must be the same register"));
11019
11020 inst.instruction = THUMB_OP16 (inst.instruction);
11021 inst.instruction |= Rd;
11022 inst.instruction |= Rn << 3;
11023 }
11024 }
11025
11026 /* Similarly, but for instructions where the arithmetic operation is
11027 commutative, so we can allow either of them to be different from
11028 the destination operand in a 16-bit instruction. For instance, all
11029 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11030 accepted. */
11031 static void
11032 do_t_arit3c (void)
11033 {
11034 int Rd, Rs, Rn;
11035
11036 Rd = inst.operands[0].reg;
11037 Rs = (inst.operands[1].present
11038 ? inst.operands[1].reg /* Rd, Rs, foo */
11039 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11040 Rn = inst.operands[2].reg;
11041
11042 reject_bad_reg (Rd);
11043 reject_bad_reg (Rs);
11044 if (inst.operands[2].isreg)
11045 reject_bad_reg (Rn);
11046
11047 if (unified_syntax)
11048 {
11049 if (!inst.operands[2].isreg)
11050 {
11051 /* For an immediate, we always generate a 32-bit opcode;
11052 section relaxation will shrink it later if possible. */
11053 inst.instruction = THUMB_OP32 (inst.instruction);
11054 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11055 inst.instruction |= Rd << 8;
11056 inst.instruction |= Rs << 16;
11057 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11058 }
11059 else
11060 {
11061 bfd_boolean narrow;
11062
11063 /* See if we can do this with a 16-bit instruction. */
11064 if (THUMB_SETS_FLAGS (inst.instruction))
11065 narrow = !in_it_block ();
11066 else
11067 narrow = in_it_block ();
11068
11069 if (Rd > 7 || Rn > 7 || Rs > 7)
11070 narrow = FALSE;
11071 if (inst.operands[2].shifted)
11072 narrow = FALSE;
11073 if (inst.size_req == 4)
11074 narrow = FALSE;
11075
11076 if (narrow)
11077 {
11078 if (Rd == Rs)
11079 {
11080 inst.instruction = THUMB_OP16 (inst.instruction);
11081 inst.instruction |= Rd;
11082 inst.instruction |= Rn << 3;
11083 return;
11084 }
11085 if (Rd == Rn)
11086 {
11087 inst.instruction = THUMB_OP16 (inst.instruction);
11088 inst.instruction |= Rd;
11089 inst.instruction |= Rs << 3;
11090 return;
11091 }
11092 }
11093
11094 /* If we get here, it can't be done in 16 bits. */
11095 constraint (inst.operands[2].shifted
11096 && inst.operands[2].immisreg,
11097 _("shift must be constant"));
11098 inst.instruction = THUMB_OP32 (inst.instruction);
11099 inst.instruction |= Rd << 8;
11100 inst.instruction |= Rs << 16;
11101 encode_thumb32_shifted_operand (2);
11102 }
11103 }
11104 else
11105 {
11106 /* On its face this is a lie - the instruction does set the
11107 flags. However, the only supported mnemonic in this mode
11108 says it doesn't. */
11109 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11110
11111 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11112 _("unshifted register required"));
11113 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11114
11115 inst.instruction = THUMB_OP16 (inst.instruction);
11116 inst.instruction |= Rd;
11117
11118 if (Rd == Rs)
11119 inst.instruction |= Rn << 3;
11120 else if (Rd == Rn)
11121 inst.instruction |= Rs << 3;
11122 else
11123 constraint (1, _("dest must overlap one source register"));
11124 }
11125 }
11126
11127 static void
11128 do_t_bfc (void)
11129 {
11130 unsigned Rd;
11131 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11132 constraint (msb > 32, _("bit-field extends past end of register"));
11133 /* The instruction encoding stores the LSB and MSB,
11134 not the LSB and width. */
11135 Rd = inst.operands[0].reg;
11136 reject_bad_reg (Rd);
11137 inst.instruction |= Rd << 8;
11138 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11139 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11140 inst.instruction |= msb - 1;
11141 }
11142
11143 static void
11144 do_t_bfi (void)
11145 {
11146 int Rd, Rn;
11147 unsigned int msb;
11148
11149 Rd = inst.operands[0].reg;
11150 reject_bad_reg (Rd);
11151
11152 /* #0 in second position is alternative syntax for bfc, which is
11153 the same instruction but with REG_PC in the Rm field. */
11154 if (!inst.operands[1].isreg)
11155 Rn = REG_PC;
11156 else
11157 {
11158 Rn = inst.operands[1].reg;
11159 reject_bad_reg (Rn);
11160 }
11161
11162 msb = inst.operands[2].imm + inst.operands[3].imm;
11163 constraint (msb > 32, _("bit-field extends past end of register"));
11164 /* The instruction encoding stores the LSB and MSB,
11165 not the LSB and width. */
11166 inst.instruction |= Rd << 8;
11167 inst.instruction |= Rn << 16;
11168 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11169 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11170 inst.instruction |= msb - 1;
11171 }
11172
11173 static void
11174 do_t_bfx (void)
11175 {
11176 unsigned Rd, Rn;
11177
11178 Rd = inst.operands[0].reg;
11179 Rn = inst.operands[1].reg;
11180
11181 reject_bad_reg (Rd);
11182 reject_bad_reg (Rn);
11183
11184 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11185 _("bit-field extends past end of register"));
11186 inst.instruction |= Rd << 8;
11187 inst.instruction |= Rn << 16;
11188 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11189 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11190 inst.instruction |= inst.operands[3].imm - 1;
11191 }
11192
11193 /* ARM V5 Thumb BLX (argument parse)
11194 BLX <target_addr> which is BLX(1)
11195 BLX <Rm> which is BLX(2)
11196 Unfortunately, there are two different opcodes for this mnemonic.
11197 So, the insns[].value is not used, and the code here zaps values
11198 into inst.instruction.
11199
11200 ??? How to take advantage of the additional two bits of displacement
11201 available in Thumb32 mode? Need new relocation? */
11202
11203 static void
11204 do_t_blx (void)
11205 {
11206 set_it_insn_type_last ();
11207
11208 if (inst.operands[0].isreg)
11209 {
11210 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11211 /* We have a register, so this is BLX(2). */
11212 inst.instruction |= inst.operands[0].reg << 3;
11213 }
11214 else
11215 {
11216 /* No register. This must be BLX(1). */
11217 inst.instruction = 0xf000e800;
11218 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11219 }
11220 }
11221
11222 static void
11223 do_t_branch (void)
11224 {
11225 int opcode;
11226 int cond;
11227 bfd_reloc_code_real_type reloc;
11228
11229 cond = inst.cond;
11230 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11231
11232 if (in_it_block ())
11233 {
11234 /* Conditional branches inside IT blocks are encoded as unconditional
11235 branches. */
11236 cond = COND_ALWAYS;
11237 }
11238 else
11239 cond = inst.cond;
11240
11241 if (cond != COND_ALWAYS)
11242 opcode = T_MNEM_bcond;
11243 else
11244 opcode = inst.instruction;
11245
11246 if (unified_syntax
11247 && (inst.size_req == 4
11248 || (inst.size_req != 2
11249 && (inst.operands[0].hasreloc
11250 || inst.relocs[0].exp.X_op == O_constant))))
11251 {
11252 inst.instruction = THUMB_OP32(opcode);
11253 if (cond == COND_ALWAYS)
11254 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11255 else
11256 {
11257 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11258 _("selected architecture does not support "
11259 "wide conditional branch instruction"));
11260
11261 gas_assert (cond != 0xF);
11262 inst.instruction |= cond << 22;
11263 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11264 }
11265 }
11266 else
11267 {
11268 inst.instruction = THUMB_OP16(opcode);
11269 if (cond == COND_ALWAYS)
11270 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11271 else
11272 {
11273 inst.instruction |= cond << 8;
11274 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11275 }
11276 /* Allow section relaxation. */
11277 if (unified_syntax && inst.size_req != 2)
11278 inst.relax = opcode;
11279 }
11280 inst.relocs[0].type = reloc;
11281 inst.relocs[0].pc_rel = 1;
11282 }
11283
11284 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11285 between the two is the maximum immediate allowed - which is passed in
11286 RANGE. */
11287 static void
11288 do_t_bkpt_hlt1 (int range)
11289 {
11290 constraint (inst.cond != COND_ALWAYS,
11291 _("instruction is always unconditional"));
11292 if (inst.operands[0].present)
11293 {
11294 constraint (inst.operands[0].imm > range,
11295 _("immediate value out of range"));
11296 inst.instruction |= inst.operands[0].imm;
11297 }
11298
11299 set_it_insn_type (NEUTRAL_IT_INSN);
11300 }
11301
11302 static void
11303 do_t_hlt (void)
11304 {
11305 do_t_bkpt_hlt1 (63);
11306 }
11307
11308 static void
11309 do_t_bkpt (void)
11310 {
11311 do_t_bkpt_hlt1 (255);
11312 }
11313
11314 static void
11315 do_t_branch23 (void)
11316 {
11317 set_it_insn_type_last ();
11318 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11319
11320 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11321 this file. We used to simply ignore the PLT reloc type here --
11322 the branch encoding is now needed to deal with TLSCALL relocs.
11323 So if we see a PLT reloc now, put it back to how it used to be to
11324 keep the preexisting behaviour. */
11325 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11326 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11327
11328 #if defined(OBJ_COFF)
11329 /* If the destination of the branch is a defined symbol which does not have
11330 the THUMB_FUNC attribute, then we must be calling a function which has
11331 the (interfacearm) attribute. We look for the Thumb entry point to that
11332 function and change the branch to refer to that function instead. */
11333 if ( inst.relocs[0].exp.X_op == O_symbol
11334 && inst.relocs[0].exp.X_add_symbol != NULL
11335 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11336 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11337 inst.relocs[0].exp.X_add_symbol
11338 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11339 #endif
11340 }
11341
11342 static void
11343 do_t_bx (void)
11344 {
11345 set_it_insn_type_last ();
11346 inst.instruction |= inst.operands[0].reg << 3;
11347 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11348 should cause the alignment to be checked once it is known. This is
11349 because BX PC only works if the instruction is word aligned. */
11350 }
11351
11352 static void
11353 do_t_bxj (void)
11354 {
11355 int Rm;
11356
11357 set_it_insn_type_last ();
11358 Rm = inst.operands[0].reg;
11359 reject_bad_reg (Rm);
11360 inst.instruction |= Rm << 16;
11361 }
11362
11363 static void
11364 do_t_clz (void)
11365 {
11366 unsigned Rd;
11367 unsigned Rm;
11368
11369 Rd = inst.operands[0].reg;
11370 Rm = inst.operands[1].reg;
11371
11372 reject_bad_reg (Rd);
11373 reject_bad_reg (Rm);
11374
11375 inst.instruction |= Rd << 8;
11376 inst.instruction |= Rm << 16;
11377 inst.instruction |= Rm;
11378 }
11379
11380 static void
11381 do_t_csdb (void)
11382 {
11383 set_it_insn_type (OUTSIDE_IT_INSN);
11384 }
11385
11386 static void
11387 do_t_cps (void)
11388 {
11389 set_it_insn_type (OUTSIDE_IT_INSN);
11390 inst.instruction |= inst.operands[0].imm;
11391 }
11392
11393 static void
11394 do_t_cpsi (void)
11395 {
11396 set_it_insn_type (OUTSIDE_IT_INSN);
11397 if (unified_syntax
11398 && (inst.operands[1].present || inst.size_req == 4)
11399 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11400 {
11401 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11402 inst.instruction = 0xf3af8000;
11403 inst.instruction |= imod << 9;
11404 inst.instruction |= inst.operands[0].imm << 5;
11405 if (inst.operands[1].present)
11406 inst.instruction |= 0x100 | inst.operands[1].imm;
11407 }
11408 else
11409 {
11410 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11411 && (inst.operands[0].imm & 4),
11412 _("selected processor does not support 'A' form "
11413 "of this instruction"));
11414 constraint (inst.operands[1].present || inst.size_req == 4,
11415 _("Thumb does not support the 2-argument "
11416 "form of this instruction"));
11417 inst.instruction |= inst.operands[0].imm;
11418 }
11419 }
11420
11421 /* THUMB CPY instruction (argument parse). */
11422
11423 static void
11424 do_t_cpy (void)
11425 {
11426 if (inst.size_req == 4)
11427 {
11428 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11429 inst.instruction |= inst.operands[0].reg << 8;
11430 inst.instruction |= inst.operands[1].reg;
11431 }
11432 else
11433 {
11434 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11435 inst.instruction |= (inst.operands[0].reg & 0x7);
11436 inst.instruction |= inst.operands[1].reg << 3;
11437 }
11438 }
11439
11440 static void
11441 do_t_cbz (void)
11442 {
11443 set_it_insn_type (OUTSIDE_IT_INSN);
11444 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11445 inst.instruction |= inst.operands[0].reg;
11446 inst.relocs[0].pc_rel = 1;
11447 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11448 }
11449
11450 static void
11451 do_t_dbg (void)
11452 {
11453 inst.instruction |= inst.operands[0].imm;
11454 }
11455
11456 static void
11457 do_t_div (void)
11458 {
11459 unsigned Rd, Rn, Rm;
11460
11461 Rd = inst.operands[0].reg;
11462 Rn = (inst.operands[1].present
11463 ? inst.operands[1].reg : Rd);
11464 Rm = inst.operands[2].reg;
11465
11466 reject_bad_reg (Rd);
11467 reject_bad_reg (Rn);
11468 reject_bad_reg (Rm);
11469
11470 inst.instruction |= Rd << 8;
11471 inst.instruction |= Rn << 16;
11472 inst.instruction |= Rm;
11473 }
11474
11475 static void
11476 do_t_hint (void)
11477 {
11478 if (unified_syntax && inst.size_req == 4)
11479 inst.instruction = THUMB_OP32 (inst.instruction);
11480 else
11481 inst.instruction = THUMB_OP16 (inst.instruction);
11482 }
11483
11484 static void
11485 do_t_it (void)
11486 {
11487 unsigned int cond = inst.operands[0].imm;
11488
11489 set_it_insn_type (IT_INSN);
11490 now_it.mask = (inst.instruction & 0xf) | 0x10;
11491 now_it.cc = cond;
11492 now_it.warn_deprecated = FALSE;
11493
11494 /* If the condition is a negative condition, invert the mask. */
11495 if ((cond & 0x1) == 0x0)
11496 {
11497 unsigned int mask = inst.instruction & 0x000f;
11498
11499 if ((mask & 0x7) == 0)
11500 {
11501 /* No conversion needed. */
11502 now_it.block_length = 1;
11503 }
11504 else if ((mask & 0x3) == 0)
11505 {
11506 mask ^= 0x8;
11507 now_it.block_length = 2;
11508 }
11509 else if ((mask & 0x1) == 0)
11510 {
11511 mask ^= 0xC;
11512 now_it.block_length = 3;
11513 }
11514 else
11515 {
11516 mask ^= 0xE;
11517 now_it.block_length = 4;
11518 }
11519
11520 inst.instruction &= 0xfff0;
11521 inst.instruction |= mask;
11522 }
11523
11524 inst.instruction |= cond << 4;
11525 }
11526
11527 /* Helper function used for both push/pop and ldm/stm. */
11528 static void
11529 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11530 bfd_boolean writeback)
11531 {
11532 bfd_boolean load, store;
11533
11534 gas_assert (base != -1 || !do_io);
11535 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11536 store = do_io && !load;
11537
11538 if (mask & (1 << 13))
11539 inst.error = _("SP not allowed in register list");
11540
11541 if (do_io && (mask & (1 << base)) != 0
11542 && writeback)
11543 inst.error = _("having the base register in the register list when "
11544 "using write back is UNPREDICTABLE");
11545
11546 if (load)
11547 {
11548 if (mask & (1 << 15))
11549 {
11550 if (mask & (1 << 14))
11551 inst.error = _("LR and PC should not both be in register list");
11552 else
11553 set_it_insn_type_last ();
11554 }
11555 }
11556 else if (store)
11557 {
11558 if (mask & (1 << 15))
11559 inst.error = _("PC not allowed in register list");
11560 }
11561
11562 if (do_io && ((mask & (mask - 1)) == 0))
11563 {
11564 /* Single register transfers implemented as str/ldr. */
11565 if (writeback)
11566 {
11567 if (inst.instruction & (1 << 23))
11568 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11569 else
11570 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11571 }
11572 else
11573 {
11574 if (inst.instruction & (1 << 23))
11575 inst.instruction = 0x00800000; /* ia -> [base] */
11576 else
11577 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11578 }
11579
11580 inst.instruction |= 0xf8400000;
11581 if (load)
11582 inst.instruction |= 0x00100000;
11583
11584 mask = ffs (mask) - 1;
11585 mask <<= 12;
11586 }
11587 else if (writeback)
11588 inst.instruction |= WRITE_BACK;
11589
11590 inst.instruction |= mask;
11591 if (do_io)
11592 inst.instruction |= base << 16;
11593 }
11594
11595 static void
11596 do_t_ldmstm (void)
11597 {
11598 /* This really doesn't seem worth it. */
11599 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11600 _("expression too complex"));
11601 constraint (inst.operands[1].writeback,
11602 _("Thumb load/store multiple does not support {reglist}^"));
11603
11604 if (unified_syntax)
11605 {
11606 bfd_boolean narrow;
11607 unsigned mask;
11608
11609 narrow = FALSE;
11610 /* See if we can use a 16-bit instruction. */
11611 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11612 && inst.size_req != 4
11613 && !(inst.operands[1].imm & ~0xff))
11614 {
11615 mask = 1 << inst.operands[0].reg;
11616
11617 if (inst.operands[0].reg <= 7)
11618 {
11619 if (inst.instruction == T_MNEM_stmia
11620 ? inst.operands[0].writeback
11621 : (inst.operands[0].writeback
11622 == !(inst.operands[1].imm & mask)))
11623 {
11624 if (inst.instruction == T_MNEM_stmia
11625 && (inst.operands[1].imm & mask)
11626 && (inst.operands[1].imm & (mask - 1)))
11627 as_warn (_("value stored for r%d is UNKNOWN"),
11628 inst.operands[0].reg);
11629
11630 inst.instruction = THUMB_OP16 (inst.instruction);
11631 inst.instruction |= inst.operands[0].reg << 8;
11632 inst.instruction |= inst.operands[1].imm;
11633 narrow = TRUE;
11634 }
11635 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11636 {
11637 /* This means 1 register in reg list one of 3 situations:
11638 1. Instruction is stmia, but without writeback.
11639 2. lmdia without writeback, but with Rn not in
11640 reglist.
11641 3. ldmia with writeback, but with Rn in reglist.
11642 Case 3 is UNPREDICTABLE behaviour, so we handle
11643 case 1 and 2 which can be converted into a 16-bit
11644 str or ldr. The SP cases are handled below. */
11645 unsigned long opcode;
11646 /* First, record an error for Case 3. */
11647 if (inst.operands[1].imm & mask
11648 && inst.operands[0].writeback)
11649 inst.error =
11650 _("having the base register in the register list when "
11651 "using write back is UNPREDICTABLE");
11652
11653 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11654 : T_MNEM_ldr);
11655 inst.instruction = THUMB_OP16 (opcode);
11656 inst.instruction |= inst.operands[0].reg << 3;
11657 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11658 narrow = TRUE;
11659 }
11660 }
11661 else if (inst.operands[0] .reg == REG_SP)
11662 {
11663 if (inst.operands[0].writeback)
11664 {
11665 inst.instruction =
11666 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11667 ? T_MNEM_push : T_MNEM_pop);
11668 inst.instruction |= inst.operands[1].imm;
11669 narrow = TRUE;
11670 }
11671 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11672 {
11673 inst.instruction =
11674 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11675 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11676 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11677 narrow = TRUE;
11678 }
11679 }
11680 }
11681
11682 if (!narrow)
11683 {
11684 if (inst.instruction < 0xffff)
11685 inst.instruction = THUMB_OP32 (inst.instruction);
11686
11687 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
11688 inst.operands[1].imm,
11689 inst.operands[0].writeback);
11690 }
11691 }
11692 else
11693 {
11694 constraint (inst.operands[0].reg > 7
11695 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11696 constraint (inst.instruction != T_MNEM_ldmia
11697 && inst.instruction != T_MNEM_stmia,
11698 _("Thumb-2 instruction only valid in unified syntax"));
11699 if (inst.instruction == T_MNEM_stmia)
11700 {
11701 if (!inst.operands[0].writeback)
11702 as_warn (_("this instruction will write back the base register"));
11703 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11704 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11705 as_warn (_("value stored for r%d is UNKNOWN"),
11706 inst.operands[0].reg);
11707 }
11708 else
11709 {
11710 if (!inst.operands[0].writeback
11711 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11712 as_warn (_("this instruction will write back the base register"));
11713 else if (inst.operands[0].writeback
11714 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11715 as_warn (_("this instruction will not write back the base register"));
11716 }
11717
11718 inst.instruction = THUMB_OP16 (inst.instruction);
11719 inst.instruction |= inst.operands[0].reg << 8;
11720 inst.instruction |= inst.operands[1].imm;
11721 }
11722 }
11723
11724 static void
11725 do_t_ldrex (void)
11726 {
11727 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11728 || inst.operands[1].postind || inst.operands[1].writeback
11729 || inst.operands[1].immisreg || inst.operands[1].shifted
11730 || inst.operands[1].negative,
11731 BAD_ADDR_MODE);
11732
11733 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11734
11735 inst.instruction |= inst.operands[0].reg << 12;
11736 inst.instruction |= inst.operands[1].reg << 16;
11737 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
11738 }
11739
11740 static void
11741 do_t_ldrexd (void)
11742 {
11743 if (!inst.operands[1].present)
11744 {
11745 constraint (inst.operands[0].reg == REG_LR,
11746 _("r14 not allowed as first register "
11747 "when second register is omitted"));
11748 inst.operands[1].reg = inst.operands[0].reg + 1;
11749 }
11750 constraint (inst.operands[0].reg == inst.operands[1].reg,
11751 BAD_OVERLAP);
11752
11753 inst.instruction |= inst.operands[0].reg << 12;
11754 inst.instruction |= inst.operands[1].reg << 8;
11755 inst.instruction |= inst.operands[2].reg << 16;
11756 }
11757
11758 static void
11759 do_t_ldst (void)
11760 {
11761 unsigned long opcode;
11762 int Rn;
11763
11764 if (inst.operands[0].isreg
11765 && !inst.operands[0].preind
11766 && inst.operands[0].reg == REG_PC)
11767 set_it_insn_type_last ();
11768
11769 opcode = inst.instruction;
11770 if (unified_syntax)
11771 {
11772 if (!inst.operands[1].isreg)
11773 {
11774 if (opcode <= 0xffff)
11775 inst.instruction = THUMB_OP32 (opcode);
11776 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11777 return;
11778 }
11779 if (inst.operands[1].isreg
11780 && !inst.operands[1].writeback
11781 && !inst.operands[1].shifted && !inst.operands[1].postind
11782 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11783 && opcode <= 0xffff
11784 && inst.size_req != 4)
11785 {
11786 /* Insn may have a 16-bit form. */
11787 Rn = inst.operands[1].reg;
11788 if (inst.operands[1].immisreg)
11789 {
11790 inst.instruction = THUMB_OP16 (opcode);
11791 /* [Rn, Rik] */
11792 if (Rn <= 7 && inst.operands[1].imm <= 7)
11793 goto op16;
11794 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11795 reject_bad_reg (inst.operands[1].imm);
11796 }
11797 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11798 && opcode != T_MNEM_ldrsb)
11799 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11800 || (Rn == REG_SP && opcode == T_MNEM_str))
11801 {
11802 /* [Rn, #const] */
11803 if (Rn > 7)
11804 {
11805 if (Rn == REG_PC)
11806 {
11807 if (inst.relocs[0].pc_rel)
11808 opcode = T_MNEM_ldr_pc2;
11809 else
11810 opcode = T_MNEM_ldr_pc;
11811 }
11812 else
11813 {
11814 if (opcode == T_MNEM_ldr)
11815 opcode = T_MNEM_ldr_sp;
11816 else
11817 opcode = T_MNEM_str_sp;
11818 }
11819 inst.instruction = inst.operands[0].reg << 8;
11820 }
11821 else
11822 {
11823 inst.instruction = inst.operands[0].reg;
11824 inst.instruction |= inst.operands[1].reg << 3;
11825 }
11826 inst.instruction |= THUMB_OP16 (opcode);
11827 if (inst.size_req == 2)
11828 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
11829 else
11830 inst.relax = opcode;
11831 return;
11832 }
11833 }
11834 /* Definitely a 32-bit variant. */
11835
11836 /* Warning for Erratum 752419. */
11837 if (opcode == T_MNEM_ldr
11838 && inst.operands[0].reg == REG_SP
11839 && inst.operands[1].writeback == 1
11840 && !inst.operands[1].immisreg)
11841 {
11842 if (no_cpu_selected ()
11843 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11844 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11845 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11846 as_warn (_("This instruction may be unpredictable "
11847 "if executed on M-profile cores "
11848 "with interrupts enabled."));
11849 }
11850
11851 /* Do some validations regarding addressing modes. */
11852 if (inst.operands[1].immisreg)
11853 reject_bad_reg (inst.operands[1].imm);
11854
11855 constraint (inst.operands[1].writeback == 1
11856 && inst.operands[0].reg == inst.operands[1].reg,
11857 BAD_OVERLAP);
11858
11859 inst.instruction = THUMB_OP32 (opcode);
11860 inst.instruction |= inst.operands[0].reg << 12;
11861 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11862 check_ldr_r15_aligned ();
11863 return;
11864 }
11865
11866 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11867
11868 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11869 {
11870 /* Only [Rn,Rm] is acceptable. */
11871 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11872 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11873 || inst.operands[1].postind || inst.operands[1].shifted
11874 || inst.operands[1].negative,
11875 _("Thumb does not support this addressing mode"));
11876 inst.instruction = THUMB_OP16 (inst.instruction);
11877 goto op16;
11878 }
11879
11880 inst.instruction = THUMB_OP16 (inst.instruction);
11881 if (!inst.operands[1].isreg)
11882 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11883 return;
11884
11885 constraint (!inst.operands[1].preind
11886 || inst.operands[1].shifted
11887 || inst.operands[1].writeback,
11888 _("Thumb does not support this addressing mode"));
11889 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11890 {
11891 constraint (inst.instruction & 0x0600,
11892 _("byte or halfword not valid for base register"));
11893 constraint (inst.operands[1].reg == REG_PC
11894 && !(inst.instruction & THUMB_LOAD_BIT),
11895 _("r15 based store not allowed"));
11896 constraint (inst.operands[1].immisreg,
11897 _("invalid base register for register offset"));
11898
11899 if (inst.operands[1].reg == REG_PC)
11900 inst.instruction = T_OPCODE_LDR_PC;
11901 else if (inst.instruction & THUMB_LOAD_BIT)
11902 inst.instruction = T_OPCODE_LDR_SP;
11903 else
11904 inst.instruction = T_OPCODE_STR_SP;
11905
11906 inst.instruction |= inst.operands[0].reg << 8;
11907 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
11908 return;
11909 }
11910
11911 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11912 if (!inst.operands[1].immisreg)
11913 {
11914 /* Immediate offset. */
11915 inst.instruction |= inst.operands[0].reg;
11916 inst.instruction |= inst.operands[1].reg << 3;
11917 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
11918 return;
11919 }
11920
11921 /* Register offset. */
11922 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11923 constraint (inst.operands[1].negative,
11924 _("Thumb does not support this addressing mode"));
11925
11926 op16:
11927 switch (inst.instruction)
11928 {
11929 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11930 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11931 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11932 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11933 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11934 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11935 case 0x5600 /* ldrsb */:
11936 case 0x5e00 /* ldrsh */: break;
11937 default: abort ();
11938 }
11939
11940 inst.instruction |= inst.operands[0].reg;
11941 inst.instruction |= inst.operands[1].reg << 3;
11942 inst.instruction |= inst.operands[1].imm << 6;
11943 }
11944
11945 static void
11946 do_t_ldstd (void)
11947 {
11948 if (!inst.operands[1].present)
11949 {
11950 inst.operands[1].reg = inst.operands[0].reg + 1;
11951 constraint (inst.operands[0].reg == REG_LR,
11952 _("r14 not allowed here"));
11953 constraint (inst.operands[0].reg == REG_R12,
11954 _("r12 not allowed here"));
11955 }
11956
11957 if (inst.operands[2].writeback
11958 && (inst.operands[0].reg == inst.operands[2].reg
11959 || inst.operands[1].reg == inst.operands[2].reg))
11960 as_warn (_("base register written back, and overlaps "
11961 "one of transfer registers"));
11962
11963 inst.instruction |= inst.operands[0].reg << 12;
11964 inst.instruction |= inst.operands[1].reg << 8;
11965 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11966 }
11967
11968 static void
11969 do_t_ldstt (void)
11970 {
11971 inst.instruction |= inst.operands[0].reg << 12;
11972 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11973 }
11974
11975 static void
11976 do_t_mla (void)
11977 {
11978 unsigned Rd, Rn, Rm, Ra;
11979
11980 Rd = inst.operands[0].reg;
11981 Rn = inst.operands[1].reg;
11982 Rm = inst.operands[2].reg;
11983 Ra = inst.operands[3].reg;
11984
11985 reject_bad_reg (Rd);
11986 reject_bad_reg (Rn);
11987 reject_bad_reg (Rm);
11988 reject_bad_reg (Ra);
11989
11990 inst.instruction |= Rd << 8;
11991 inst.instruction |= Rn << 16;
11992 inst.instruction |= Rm;
11993 inst.instruction |= Ra << 12;
11994 }
11995
11996 static void
11997 do_t_mlal (void)
11998 {
11999 unsigned RdLo, RdHi, Rn, Rm;
12000
12001 RdLo = inst.operands[0].reg;
12002 RdHi = inst.operands[1].reg;
12003 Rn = inst.operands[2].reg;
12004 Rm = inst.operands[3].reg;
12005
12006 reject_bad_reg (RdLo);
12007 reject_bad_reg (RdHi);
12008 reject_bad_reg (Rn);
12009 reject_bad_reg (Rm);
12010
12011 inst.instruction |= RdLo << 12;
12012 inst.instruction |= RdHi << 8;
12013 inst.instruction |= Rn << 16;
12014 inst.instruction |= Rm;
12015 }
12016
12017 static void
12018 do_t_mov_cmp (void)
12019 {
12020 unsigned Rn, Rm;
12021
12022 Rn = inst.operands[0].reg;
12023 Rm = inst.operands[1].reg;
12024
12025 if (Rn == REG_PC)
12026 set_it_insn_type_last ();
12027
12028 if (unified_syntax)
12029 {
12030 int r0off = (inst.instruction == T_MNEM_mov
12031 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12032 unsigned long opcode;
12033 bfd_boolean narrow;
12034 bfd_boolean low_regs;
12035
12036 low_regs = (Rn <= 7 && Rm <= 7);
12037 opcode = inst.instruction;
12038 if (in_it_block ())
12039 narrow = opcode != T_MNEM_movs;
12040 else
12041 narrow = opcode != T_MNEM_movs || low_regs;
12042 if (inst.size_req == 4
12043 || inst.operands[1].shifted)
12044 narrow = FALSE;
12045
12046 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12047 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12048 && !inst.operands[1].shifted
12049 && Rn == REG_PC
12050 && Rm == REG_LR)
12051 {
12052 inst.instruction = T2_SUBS_PC_LR;
12053 return;
12054 }
12055
12056 if (opcode == T_MNEM_cmp)
12057 {
12058 constraint (Rn == REG_PC, BAD_PC);
12059 if (narrow)
12060 {
12061 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12062 but valid. */
12063 warn_deprecated_sp (Rm);
12064 /* R15 was documented as a valid choice for Rm in ARMv6,
12065 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12066 tools reject R15, so we do too. */
12067 constraint (Rm == REG_PC, BAD_PC);
12068 }
12069 else
12070 reject_bad_reg (Rm);
12071 }
12072 else if (opcode == T_MNEM_mov
12073 || opcode == T_MNEM_movs)
12074 {
12075 if (inst.operands[1].isreg)
12076 {
12077 if (opcode == T_MNEM_movs)
12078 {
12079 reject_bad_reg (Rn);
12080 reject_bad_reg (Rm);
12081 }
12082 else if (narrow)
12083 {
12084 /* This is mov.n. */
12085 if ((Rn == REG_SP || Rn == REG_PC)
12086 && (Rm == REG_SP || Rm == REG_PC))
12087 {
12088 as_tsktsk (_("Use of r%u as a source register is "
12089 "deprecated when r%u is the destination "
12090 "register."), Rm, Rn);
12091 }
12092 }
12093 else
12094 {
12095 /* This is mov.w. */
12096 constraint (Rn == REG_PC, BAD_PC);
12097 constraint (Rm == REG_PC, BAD_PC);
12098 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12099 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12100 }
12101 }
12102 else
12103 reject_bad_reg (Rn);
12104 }
12105
12106 if (!inst.operands[1].isreg)
12107 {
12108 /* Immediate operand. */
12109 if (!in_it_block () && opcode == T_MNEM_mov)
12110 narrow = 0;
12111 if (low_regs && narrow)
12112 {
12113 inst.instruction = THUMB_OP16 (opcode);
12114 inst.instruction |= Rn << 8;
12115 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12116 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12117 {
12118 if (inst.size_req == 2)
12119 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12120 else
12121 inst.relax = opcode;
12122 }
12123 }
12124 else
12125 {
12126 constraint ((inst.relocs[0].type
12127 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12128 && (inst.relocs[0].type
12129 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12130 THUMB1_RELOC_ONLY);
12131
12132 inst.instruction = THUMB_OP32 (inst.instruction);
12133 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12134 inst.instruction |= Rn << r0off;
12135 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12136 }
12137 }
12138 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12139 && (inst.instruction == T_MNEM_mov
12140 || inst.instruction == T_MNEM_movs))
12141 {
12142 /* Register shifts are encoded as separate shift instructions. */
12143 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12144
12145 if (in_it_block ())
12146 narrow = !flags;
12147 else
12148 narrow = flags;
12149
12150 if (inst.size_req == 4)
12151 narrow = FALSE;
12152
12153 if (!low_regs || inst.operands[1].imm > 7)
12154 narrow = FALSE;
12155
12156 if (Rn != Rm)
12157 narrow = FALSE;
12158
12159 switch (inst.operands[1].shift_kind)
12160 {
12161 case SHIFT_LSL:
12162 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12163 break;
12164 case SHIFT_ASR:
12165 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12166 break;
12167 case SHIFT_LSR:
12168 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12169 break;
12170 case SHIFT_ROR:
12171 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12172 break;
12173 default:
12174 abort ();
12175 }
12176
12177 inst.instruction = opcode;
12178 if (narrow)
12179 {
12180 inst.instruction |= Rn;
12181 inst.instruction |= inst.operands[1].imm << 3;
12182 }
12183 else
12184 {
12185 if (flags)
12186 inst.instruction |= CONDS_BIT;
12187
12188 inst.instruction |= Rn << 8;
12189 inst.instruction |= Rm << 16;
12190 inst.instruction |= inst.operands[1].imm;
12191 }
12192 }
12193 else if (!narrow)
12194 {
12195 /* Some mov with immediate shift have narrow variants.
12196 Register shifts are handled above. */
12197 if (low_regs && inst.operands[1].shifted
12198 && (inst.instruction == T_MNEM_mov
12199 || inst.instruction == T_MNEM_movs))
12200 {
12201 if (in_it_block ())
12202 narrow = (inst.instruction == T_MNEM_mov);
12203 else
12204 narrow = (inst.instruction == T_MNEM_movs);
12205 }
12206
12207 if (narrow)
12208 {
12209 switch (inst.operands[1].shift_kind)
12210 {
12211 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12212 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12213 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12214 default: narrow = FALSE; break;
12215 }
12216 }
12217
12218 if (narrow)
12219 {
12220 inst.instruction |= Rn;
12221 inst.instruction |= Rm << 3;
12222 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12223 }
12224 else
12225 {
12226 inst.instruction = THUMB_OP32 (inst.instruction);
12227 inst.instruction |= Rn << r0off;
12228 encode_thumb32_shifted_operand (1);
12229 }
12230 }
12231 else
12232 switch (inst.instruction)
12233 {
12234 case T_MNEM_mov:
12235 /* In v4t or v5t a move of two lowregs produces unpredictable
12236 results. Don't allow this. */
12237 if (low_regs)
12238 {
12239 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12240 "MOV Rd, Rs with two low registers is not "
12241 "permitted on this architecture");
12242 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12243 arm_ext_v6);
12244 }
12245
12246 inst.instruction = T_OPCODE_MOV_HR;
12247 inst.instruction |= (Rn & 0x8) << 4;
12248 inst.instruction |= (Rn & 0x7);
12249 inst.instruction |= Rm << 3;
12250 break;
12251
12252 case T_MNEM_movs:
12253 /* We know we have low registers at this point.
12254 Generate LSLS Rd, Rs, #0. */
12255 inst.instruction = T_OPCODE_LSL_I;
12256 inst.instruction |= Rn;
12257 inst.instruction |= Rm << 3;
12258 break;
12259
12260 case T_MNEM_cmp:
12261 if (low_regs)
12262 {
12263 inst.instruction = T_OPCODE_CMP_LR;
12264 inst.instruction |= Rn;
12265 inst.instruction |= Rm << 3;
12266 }
12267 else
12268 {
12269 inst.instruction = T_OPCODE_CMP_HR;
12270 inst.instruction |= (Rn & 0x8) << 4;
12271 inst.instruction |= (Rn & 0x7);
12272 inst.instruction |= Rm << 3;
12273 }
12274 break;
12275 }
12276 return;
12277 }
12278
12279 inst.instruction = THUMB_OP16 (inst.instruction);
12280
12281 /* PR 10443: Do not silently ignore shifted operands. */
12282 constraint (inst.operands[1].shifted,
12283 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12284
12285 if (inst.operands[1].isreg)
12286 {
12287 if (Rn < 8 && Rm < 8)
12288 {
12289 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12290 since a MOV instruction produces unpredictable results. */
12291 if (inst.instruction == T_OPCODE_MOV_I8)
12292 inst.instruction = T_OPCODE_ADD_I3;
12293 else
12294 inst.instruction = T_OPCODE_CMP_LR;
12295
12296 inst.instruction |= Rn;
12297 inst.instruction |= Rm << 3;
12298 }
12299 else
12300 {
12301 if (inst.instruction == T_OPCODE_MOV_I8)
12302 inst.instruction = T_OPCODE_MOV_HR;
12303 else
12304 inst.instruction = T_OPCODE_CMP_HR;
12305 do_t_cpy ();
12306 }
12307 }
12308 else
12309 {
12310 constraint (Rn > 7,
12311 _("only lo regs allowed with immediate"));
12312 inst.instruction |= Rn << 8;
12313 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12314 }
12315 }
12316
12317 static void
12318 do_t_mov16 (void)
12319 {
12320 unsigned Rd;
12321 bfd_vma imm;
12322 bfd_boolean top;
12323
12324 top = (inst.instruction & 0x00800000) != 0;
12325 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12326 {
12327 constraint (top, _(":lower16: not allowed in this instruction"));
12328 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12329 }
12330 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12331 {
12332 constraint (!top, _(":upper16: not allowed in this instruction"));
12333 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12334 }
12335
12336 Rd = inst.operands[0].reg;
12337 reject_bad_reg (Rd);
12338
12339 inst.instruction |= Rd << 8;
12340 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12341 {
12342 imm = inst.relocs[0].exp.X_add_number;
12343 inst.instruction |= (imm & 0xf000) << 4;
12344 inst.instruction |= (imm & 0x0800) << 15;
12345 inst.instruction |= (imm & 0x0700) << 4;
12346 inst.instruction |= (imm & 0x00ff);
12347 }
12348 }
12349
12350 static void
12351 do_t_mvn_tst (void)
12352 {
12353 unsigned Rn, Rm;
12354
12355 Rn = inst.operands[0].reg;
12356 Rm = inst.operands[1].reg;
12357
12358 if (inst.instruction == T_MNEM_cmp
12359 || inst.instruction == T_MNEM_cmn)
12360 constraint (Rn == REG_PC, BAD_PC);
12361 else
12362 reject_bad_reg (Rn);
12363 reject_bad_reg (Rm);
12364
12365 if (unified_syntax)
12366 {
12367 int r0off = (inst.instruction == T_MNEM_mvn
12368 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12369 bfd_boolean narrow;
12370
12371 if (inst.size_req == 4
12372 || inst.instruction > 0xffff
12373 || inst.operands[1].shifted
12374 || Rn > 7 || Rm > 7)
12375 narrow = FALSE;
12376 else if (inst.instruction == T_MNEM_cmn
12377 || inst.instruction == T_MNEM_tst)
12378 narrow = TRUE;
12379 else if (THUMB_SETS_FLAGS (inst.instruction))
12380 narrow = !in_it_block ();
12381 else
12382 narrow = in_it_block ();
12383
12384 if (!inst.operands[1].isreg)
12385 {
12386 /* For an immediate, we always generate a 32-bit opcode;
12387 section relaxation will shrink it later if possible. */
12388 if (inst.instruction < 0xffff)
12389 inst.instruction = THUMB_OP32 (inst.instruction);
12390 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12391 inst.instruction |= Rn << r0off;
12392 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12393 }
12394 else
12395 {
12396 /* See if we can do this with a 16-bit instruction. */
12397 if (narrow)
12398 {
12399 inst.instruction = THUMB_OP16 (inst.instruction);
12400 inst.instruction |= Rn;
12401 inst.instruction |= Rm << 3;
12402 }
12403 else
12404 {
12405 constraint (inst.operands[1].shifted
12406 && inst.operands[1].immisreg,
12407 _("shift must be constant"));
12408 if (inst.instruction < 0xffff)
12409 inst.instruction = THUMB_OP32 (inst.instruction);
12410 inst.instruction |= Rn << r0off;
12411 encode_thumb32_shifted_operand (1);
12412 }
12413 }
12414 }
12415 else
12416 {
12417 constraint (inst.instruction > 0xffff
12418 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12419 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12420 _("unshifted register required"));
12421 constraint (Rn > 7 || Rm > 7,
12422 BAD_HIREG);
12423
12424 inst.instruction = THUMB_OP16 (inst.instruction);
12425 inst.instruction |= Rn;
12426 inst.instruction |= Rm << 3;
12427 }
12428 }
12429
12430 static void
12431 do_t_mrs (void)
12432 {
12433 unsigned Rd;
12434
12435 if (do_vfp_nsyn_mrs () == SUCCESS)
12436 return;
12437
12438 Rd = inst.operands[0].reg;
12439 reject_bad_reg (Rd);
12440 inst.instruction |= Rd << 8;
12441
12442 if (inst.operands[1].isreg)
12443 {
12444 unsigned br = inst.operands[1].reg;
12445 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12446 as_bad (_("bad register for mrs"));
12447
12448 inst.instruction |= br & (0xf << 16);
12449 inst.instruction |= (br & 0x300) >> 4;
12450 inst.instruction |= (br & SPSR_BIT) >> 2;
12451 }
12452 else
12453 {
12454 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12455
12456 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12457 {
12458 /* PR gas/12698: The constraint is only applied for m_profile.
12459 If the user has specified -march=all, we want to ignore it as
12460 we are building for any CPU type, including non-m variants. */
12461 bfd_boolean m_profile =
12462 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12463 constraint ((flags != 0) && m_profile, _("selected processor does "
12464 "not support requested special purpose register"));
12465 }
12466 else
12467 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12468 devices). */
12469 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12470 _("'APSR', 'CPSR' or 'SPSR' expected"));
12471
12472 inst.instruction |= (flags & SPSR_BIT) >> 2;
12473 inst.instruction |= inst.operands[1].imm & 0xff;
12474 inst.instruction |= 0xf0000;
12475 }
12476 }
12477
12478 static void
12479 do_t_msr (void)
12480 {
12481 int flags;
12482 unsigned Rn;
12483
12484 if (do_vfp_nsyn_msr () == SUCCESS)
12485 return;
12486
12487 constraint (!inst.operands[1].isreg,
12488 _("Thumb encoding does not support an immediate here"));
12489
12490 if (inst.operands[0].isreg)
12491 flags = (int)(inst.operands[0].reg);
12492 else
12493 flags = inst.operands[0].imm;
12494
12495 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12496 {
12497 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12498
12499 /* PR gas/12698: The constraint is only applied for m_profile.
12500 If the user has specified -march=all, we want to ignore it as
12501 we are building for any CPU type, including non-m variants. */
12502 bfd_boolean m_profile =
12503 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12504 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12505 && (bits & ~(PSR_s | PSR_f)) != 0)
12506 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12507 && bits != PSR_f)) && m_profile,
12508 _("selected processor does not support requested special "
12509 "purpose register"));
12510 }
12511 else
12512 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12513 "requested special purpose register"));
12514
12515 Rn = inst.operands[1].reg;
12516 reject_bad_reg (Rn);
12517
12518 inst.instruction |= (flags & SPSR_BIT) >> 2;
12519 inst.instruction |= (flags & 0xf0000) >> 8;
12520 inst.instruction |= (flags & 0x300) >> 4;
12521 inst.instruction |= (flags & 0xff);
12522 inst.instruction |= Rn << 16;
12523 }
12524
12525 static void
12526 do_t_mul (void)
12527 {
12528 bfd_boolean narrow;
12529 unsigned Rd, Rn, Rm;
12530
12531 if (!inst.operands[2].present)
12532 inst.operands[2].reg = inst.operands[0].reg;
12533
12534 Rd = inst.operands[0].reg;
12535 Rn = inst.operands[1].reg;
12536 Rm = inst.operands[2].reg;
12537
12538 if (unified_syntax)
12539 {
12540 if (inst.size_req == 4
12541 || (Rd != Rn
12542 && Rd != Rm)
12543 || Rn > 7
12544 || Rm > 7)
12545 narrow = FALSE;
12546 else if (inst.instruction == T_MNEM_muls)
12547 narrow = !in_it_block ();
12548 else
12549 narrow = in_it_block ();
12550 }
12551 else
12552 {
12553 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12554 constraint (Rn > 7 || Rm > 7,
12555 BAD_HIREG);
12556 narrow = TRUE;
12557 }
12558
12559 if (narrow)
12560 {
12561 /* 16-bit MULS/Conditional MUL. */
12562 inst.instruction = THUMB_OP16 (inst.instruction);
12563 inst.instruction |= Rd;
12564
12565 if (Rd == Rn)
12566 inst.instruction |= Rm << 3;
12567 else if (Rd == Rm)
12568 inst.instruction |= Rn << 3;
12569 else
12570 constraint (1, _("dest must overlap one source register"));
12571 }
12572 else
12573 {
12574 constraint (inst.instruction != T_MNEM_mul,
12575 _("Thumb-2 MUL must not set flags"));
12576 /* 32-bit MUL. */
12577 inst.instruction = THUMB_OP32 (inst.instruction);
12578 inst.instruction |= Rd << 8;
12579 inst.instruction |= Rn << 16;
12580 inst.instruction |= Rm << 0;
12581
12582 reject_bad_reg (Rd);
12583 reject_bad_reg (Rn);
12584 reject_bad_reg (Rm);
12585 }
12586 }
12587
12588 static void
12589 do_t_mull (void)
12590 {
12591 unsigned RdLo, RdHi, Rn, Rm;
12592
12593 RdLo = inst.operands[0].reg;
12594 RdHi = inst.operands[1].reg;
12595 Rn = inst.operands[2].reg;
12596 Rm = inst.operands[3].reg;
12597
12598 reject_bad_reg (RdLo);
12599 reject_bad_reg (RdHi);
12600 reject_bad_reg (Rn);
12601 reject_bad_reg (Rm);
12602
12603 inst.instruction |= RdLo << 12;
12604 inst.instruction |= RdHi << 8;
12605 inst.instruction |= Rn << 16;
12606 inst.instruction |= Rm;
12607
12608 if (RdLo == RdHi)
12609 as_tsktsk (_("rdhi and rdlo must be different"));
12610 }
12611
12612 static void
12613 do_t_nop (void)
12614 {
12615 set_it_insn_type (NEUTRAL_IT_INSN);
12616
12617 if (unified_syntax)
12618 {
12619 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12620 {
12621 inst.instruction = THUMB_OP32 (inst.instruction);
12622 inst.instruction |= inst.operands[0].imm;
12623 }
12624 else
12625 {
12626 /* PR9722: Check for Thumb2 availability before
12627 generating a thumb2 nop instruction. */
12628 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12629 {
12630 inst.instruction = THUMB_OP16 (inst.instruction);
12631 inst.instruction |= inst.operands[0].imm << 4;
12632 }
12633 else
12634 inst.instruction = 0x46c0;
12635 }
12636 }
12637 else
12638 {
12639 constraint (inst.operands[0].present,
12640 _("Thumb does not support NOP with hints"));
12641 inst.instruction = 0x46c0;
12642 }
12643 }
12644
12645 static void
12646 do_t_neg (void)
12647 {
12648 if (unified_syntax)
12649 {
12650 bfd_boolean narrow;
12651
12652 if (THUMB_SETS_FLAGS (inst.instruction))
12653 narrow = !in_it_block ();
12654 else
12655 narrow = in_it_block ();
12656 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12657 narrow = FALSE;
12658 if (inst.size_req == 4)
12659 narrow = FALSE;
12660
12661 if (!narrow)
12662 {
12663 inst.instruction = THUMB_OP32 (inst.instruction);
12664 inst.instruction |= inst.operands[0].reg << 8;
12665 inst.instruction |= inst.operands[1].reg << 16;
12666 }
12667 else
12668 {
12669 inst.instruction = THUMB_OP16 (inst.instruction);
12670 inst.instruction |= inst.operands[0].reg;
12671 inst.instruction |= inst.operands[1].reg << 3;
12672 }
12673 }
12674 else
12675 {
12676 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12677 BAD_HIREG);
12678 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12679
12680 inst.instruction = THUMB_OP16 (inst.instruction);
12681 inst.instruction |= inst.operands[0].reg;
12682 inst.instruction |= inst.operands[1].reg << 3;
12683 }
12684 }
12685
12686 static void
12687 do_t_orn (void)
12688 {
12689 unsigned Rd, Rn;
12690
12691 Rd = inst.operands[0].reg;
12692 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12693
12694 reject_bad_reg (Rd);
12695 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12696 reject_bad_reg (Rn);
12697
12698 inst.instruction |= Rd << 8;
12699 inst.instruction |= Rn << 16;
12700
12701 if (!inst.operands[2].isreg)
12702 {
12703 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12704 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12705 }
12706 else
12707 {
12708 unsigned Rm;
12709
12710 Rm = inst.operands[2].reg;
12711 reject_bad_reg (Rm);
12712
12713 constraint (inst.operands[2].shifted
12714 && inst.operands[2].immisreg,
12715 _("shift must be constant"));
12716 encode_thumb32_shifted_operand (2);
12717 }
12718 }
12719
12720 static void
12721 do_t_pkhbt (void)
12722 {
12723 unsigned Rd, Rn, Rm;
12724
12725 Rd = inst.operands[0].reg;
12726 Rn = inst.operands[1].reg;
12727 Rm = inst.operands[2].reg;
12728
12729 reject_bad_reg (Rd);
12730 reject_bad_reg (Rn);
12731 reject_bad_reg (Rm);
12732
12733 inst.instruction |= Rd << 8;
12734 inst.instruction |= Rn << 16;
12735 inst.instruction |= Rm;
12736 if (inst.operands[3].present)
12737 {
12738 unsigned int val = inst.relocs[0].exp.X_add_number;
12739 constraint (inst.relocs[0].exp.X_op != O_constant,
12740 _("expression too complex"));
12741 inst.instruction |= (val & 0x1c) << 10;
12742 inst.instruction |= (val & 0x03) << 6;
12743 }
12744 }
12745
12746 static void
12747 do_t_pkhtb (void)
12748 {
12749 if (!inst.operands[3].present)
12750 {
12751 unsigned Rtmp;
12752
12753 inst.instruction &= ~0x00000020;
12754
12755 /* PR 10168. Swap the Rm and Rn registers. */
12756 Rtmp = inst.operands[1].reg;
12757 inst.operands[1].reg = inst.operands[2].reg;
12758 inst.operands[2].reg = Rtmp;
12759 }
12760 do_t_pkhbt ();
12761 }
12762
12763 static void
12764 do_t_pld (void)
12765 {
12766 if (inst.operands[0].immisreg)
12767 reject_bad_reg (inst.operands[0].imm);
12768
12769 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12770 }
12771
12772 static void
12773 do_t_push_pop (void)
12774 {
12775 unsigned mask;
12776
12777 constraint (inst.operands[0].writeback,
12778 _("push/pop do not support {reglist}^"));
12779 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12780 _("expression too complex"));
12781
12782 mask = inst.operands[0].imm;
12783 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12784 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12785 else if (inst.size_req != 4
12786 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12787 ? REG_LR : REG_PC)))
12788 {
12789 inst.instruction = THUMB_OP16 (inst.instruction);
12790 inst.instruction |= THUMB_PP_PC_LR;
12791 inst.instruction |= mask & 0xff;
12792 }
12793 else if (unified_syntax)
12794 {
12795 inst.instruction = THUMB_OP32 (inst.instruction);
12796 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
12797 }
12798 else
12799 {
12800 inst.error = _("invalid register list to push/pop instruction");
12801 return;
12802 }
12803 }
12804
12805 static void
12806 do_t_clrm (void)
12807 {
12808 if (unified_syntax)
12809 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
12810 else
12811 {
12812 inst.error = _("invalid register list to push/pop instruction");
12813 return;
12814 }
12815 }
12816
12817 static void
12818 do_t_rbit (void)
12819 {
12820 unsigned Rd, Rm;
12821
12822 Rd = inst.operands[0].reg;
12823 Rm = inst.operands[1].reg;
12824
12825 reject_bad_reg (Rd);
12826 reject_bad_reg (Rm);
12827
12828 inst.instruction |= Rd << 8;
12829 inst.instruction |= Rm << 16;
12830 inst.instruction |= Rm;
12831 }
12832
12833 static void
12834 do_t_rev (void)
12835 {
12836 unsigned Rd, Rm;
12837
12838 Rd = inst.operands[0].reg;
12839 Rm = inst.operands[1].reg;
12840
12841 reject_bad_reg (Rd);
12842 reject_bad_reg (Rm);
12843
12844 if (Rd <= 7 && Rm <= 7
12845 && inst.size_req != 4)
12846 {
12847 inst.instruction = THUMB_OP16 (inst.instruction);
12848 inst.instruction |= Rd;
12849 inst.instruction |= Rm << 3;
12850 }
12851 else if (unified_syntax)
12852 {
12853 inst.instruction = THUMB_OP32 (inst.instruction);
12854 inst.instruction |= Rd << 8;
12855 inst.instruction |= Rm << 16;
12856 inst.instruction |= Rm;
12857 }
12858 else
12859 inst.error = BAD_HIREG;
12860 }
12861
12862 static void
12863 do_t_rrx (void)
12864 {
12865 unsigned Rd, Rm;
12866
12867 Rd = inst.operands[0].reg;
12868 Rm = inst.operands[1].reg;
12869
12870 reject_bad_reg (Rd);
12871 reject_bad_reg (Rm);
12872
12873 inst.instruction |= Rd << 8;
12874 inst.instruction |= Rm;
12875 }
12876
12877 static void
12878 do_t_rsb (void)
12879 {
12880 unsigned Rd, Rs;
12881
12882 Rd = inst.operands[0].reg;
12883 Rs = (inst.operands[1].present
12884 ? inst.operands[1].reg /* Rd, Rs, foo */
12885 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12886
12887 reject_bad_reg (Rd);
12888 reject_bad_reg (Rs);
12889 if (inst.operands[2].isreg)
12890 reject_bad_reg (inst.operands[2].reg);
12891
12892 inst.instruction |= Rd << 8;
12893 inst.instruction |= Rs << 16;
12894 if (!inst.operands[2].isreg)
12895 {
12896 bfd_boolean narrow;
12897
12898 if ((inst.instruction & 0x00100000) != 0)
12899 narrow = !in_it_block ();
12900 else
12901 narrow = in_it_block ();
12902
12903 if (Rd > 7 || Rs > 7)
12904 narrow = FALSE;
12905
12906 if (inst.size_req == 4 || !unified_syntax)
12907 narrow = FALSE;
12908
12909 if (inst.relocs[0].exp.X_op != O_constant
12910 || inst.relocs[0].exp.X_add_number != 0)
12911 narrow = FALSE;
12912
12913 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12914 relaxation, but it doesn't seem worth the hassle. */
12915 if (narrow)
12916 {
12917 inst.relocs[0].type = BFD_RELOC_UNUSED;
12918 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12919 inst.instruction |= Rs << 3;
12920 inst.instruction |= Rd;
12921 }
12922 else
12923 {
12924 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12925 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12926 }
12927 }
12928 else
12929 encode_thumb32_shifted_operand (2);
12930 }
12931
12932 static void
12933 do_t_setend (void)
12934 {
12935 if (warn_on_deprecated
12936 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12937 as_tsktsk (_("setend use is deprecated for ARMv8"));
12938
12939 set_it_insn_type (OUTSIDE_IT_INSN);
12940 if (inst.operands[0].imm)
12941 inst.instruction |= 0x8;
12942 }
12943
12944 static void
12945 do_t_shift (void)
12946 {
12947 if (!inst.operands[1].present)
12948 inst.operands[1].reg = inst.operands[0].reg;
12949
12950 if (unified_syntax)
12951 {
12952 bfd_boolean narrow;
12953 int shift_kind;
12954
12955 switch (inst.instruction)
12956 {
12957 case T_MNEM_asr:
12958 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12959 case T_MNEM_lsl:
12960 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12961 case T_MNEM_lsr:
12962 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12963 case T_MNEM_ror:
12964 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12965 default: abort ();
12966 }
12967
12968 if (THUMB_SETS_FLAGS (inst.instruction))
12969 narrow = !in_it_block ();
12970 else
12971 narrow = in_it_block ();
12972 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12973 narrow = FALSE;
12974 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12975 narrow = FALSE;
12976 if (inst.operands[2].isreg
12977 && (inst.operands[1].reg != inst.operands[0].reg
12978 || inst.operands[2].reg > 7))
12979 narrow = FALSE;
12980 if (inst.size_req == 4)
12981 narrow = FALSE;
12982
12983 reject_bad_reg (inst.operands[0].reg);
12984 reject_bad_reg (inst.operands[1].reg);
12985
12986 if (!narrow)
12987 {
12988 if (inst.operands[2].isreg)
12989 {
12990 reject_bad_reg (inst.operands[2].reg);
12991 inst.instruction = THUMB_OP32 (inst.instruction);
12992 inst.instruction |= inst.operands[0].reg << 8;
12993 inst.instruction |= inst.operands[1].reg << 16;
12994 inst.instruction |= inst.operands[2].reg;
12995
12996 /* PR 12854: Error on extraneous shifts. */
12997 constraint (inst.operands[2].shifted,
12998 _("extraneous shift as part of operand to shift insn"));
12999 }
13000 else
13001 {
13002 inst.operands[1].shifted = 1;
13003 inst.operands[1].shift_kind = shift_kind;
13004 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13005 ? T_MNEM_movs : T_MNEM_mov);
13006 inst.instruction |= inst.operands[0].reg << 8;
13007 encode_thumb32_shifted_operand (1);
13008 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13009 inst.relocs[0].type = BFD_RELOC_UNUSED;
13010 }
13011 }
13012 else
13013 {
13014 if (inst.operands[2].isreg)
13015 {
13016 switch (shift_kind)
13017 {
13018 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13019 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13020 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13021 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13022 default: abort ();
13023 }
13024
13025 inst.instruction |= inst.operands[0].reg;
13026 inst.instruction |= inst.operands[2].reg << 3;
13027
13028 /* PR 12854: Error on extraneous shifts. */
13029 constraint (inst.operands[2].shifted,
13030 _("extraneous shift as part of operand to shift insn"));
13031 }
13032 else
13033 {
13034 switch (shift_kind)
13035 {
13036 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13037 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13038 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13039 default: abort ();
13040 }
13041 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13042 inst.instruction |= inst.operands[0].reg;
13043 inst.instruction |= inst.operands[1].reg << 3;
13044 }
13045 }
13046 }
13047 else
13048 {
13049 constraint (inst.operands[0].reg > 7
13050 || inst.operands[1].reg > 7, BAD_HIREG);
13051 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13052
13053 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13054 {
13055 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13056 constraint (inst.operands[0].reg != inst.operands[1].reg,
13057 _("source1 and dest must be same register"));
13058
13059 switch (inst.instruction)
13060 {
13061 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13062 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13063 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13064 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13065 default: abort ();
13066 }
13067
13068 inst.instruction |= inst.operands[0].reg;
13069 inst.instruction |= inst.operands[2].reg << 3;
13070
13071 /* PR 12854: Error on extraneous shifts. */
13072 constraint (inst.operands[2].shifted,
13073 _("extraneous shift as part of operand to shift insn"));
13074 }
13075 else
13076 {
13077 switch (inst.instruction)
13078 {
13079 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13080 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13081 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13082 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13083 default: abort ();
13084 }
13085 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13086 inst.instruction |= inst.operands[0].reg;
13087 inst.instruction |= inst.operands[1].reg << 3;
13088 }
13089 }
13090 }
13091
13092 static void
13093 do_t_simd (void)
13094 {
13095 unsigned Rd, Rn, Rm;
13096
13097 Rd = inst.operands[0].reg;
13098 Rn = inst.operands[1].reg;
13099 Rm = inst.operands[2].reg;
13100
13101 reject_bad_reg (Rd);
13102 reject_bad_reg (Rn);
13103 reject_bad_reg (Rm);
13104
13105 inst.instruction |= Rd << 8;
13106 inst.instruction |= Rn << 16;
13107 inst.instruction |= Rm;
13108 }
13109
13110 static void
13111 do_t_simd2 (void)
13112 {
13113 unsigned Rd, Rn, Rm;
13114
13115 Rd = inst.operands[0].reg;
13116 Rm = inst.operands[1].reg;
13117 Rn = inst.operands[2].reg;
13118
13119 reject_bad_reg (Rd);
13120 reject_bad_reg (Rn);
13121 reject_bad_reg (Rm);
13122
13123 inst.instruction |= Rd << 8;
13124 inst.instruction |= Rn << 16;
13125 inst.instruction |= Rm;
13126 }
13127
13128 static void
13129 do_t_smc (void)
13130 {
13131 unsigned int value = inst.relocs[0].exp.X_add_number;
13132 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13133 _("SMC is not permitted on this architecture"));
13134 constraint (inst.relocs[0].exp.X_op != O_constant,
13135 _("expression too complex"));
13136 inst.relocs[0].type = BFD_RELOC_UNUSED;
13137 inst.instruction |= (value & 0xf000) >> 12;
13138 inst.instruction |= (value & 0x0ff0);
13139 inst.instruction |= (value & 0x000f) << 16;
13140 /* PR gas/15623: SMC instructions must be last in an IT block. */
13141 set_it_insn_type_last ();
13142 }
13143
13144 static void
13145 do_t_hvc (void)
13146 {
13147 unsigned int value = inst.relocs[0].exp.X_add_number;
13148
13149 inst.relocs[0].type = BFD_RELOC_UNUSED;
13150 inst.instruction |= (value & 0x0fff);
13151 inst.instruction |= (value & 0xf000) << 4;
13152 }
13153
13154 static void
13155 do_t_ssat_usat (int bias)
13156 {
13157 unsigned Rd, Rn;
13158
13159 Rd = inst.operands[0].reg;
13160 Rn = inst.operands[2].reg;
13161
13162 reject_bad_reg (Rd);
13163 reject_bad_reg (Rn);
13164
13165 inst.instruction |= Rd << 8;
13166 inst.instruction |= inst.operands[1].imm - bias;
13167 inst.instruction |= Rn << 16;
13168
13169 if (inst.operands[3].present)
13170 {
13171 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13172
13173 inst.relocs[0].type = BFD_RELOC_UNUSED;
13174
13175 constraint (inst.relocs[0].exp.X_op != O_constant,
13176 _("expression too complex"));
13177
13178 if (shift_amount != 0)
13179 {
13180 constraint (shift_amount > 31,
13181 _("shift expression is too large"));
13182
13183 if (inst.operands[3].shift_kind == SHIFT_ASR)
13184 inst.instruction |= 0x00200000; /* sh bit. */
13185
13186 inst.instruction |= (shift_amount & 0x1c) << 10;
13187 inst.instruction |= (shift_amount & 0x03) << 6;
13188 }
13189 }
13190 }
13191
13192 static void
13193 do_t_ssat (void)
13194 {
13195 do_t_ssat_usat (1);
13196 }
13197
13198 static void
13199 do_t_ssat16 (void)
13200 {
13201 unsigned Rd, Rn;
13202
13203 Rd = inst.operands[0].reg;
13204 Rn = inst.operands[2].reg;
13205
13206 reject_bad_reg (Rd);
13207 reject_bad_reg (Rn);
13208
13209 inst.instruction |= Rd << 8;
13210 inst.instruction |= inst.operands[1].imm - 1;
13211 inst.instruction |= Rn << 16;
13212 }
13213
13214 static void
13215 do_t_strex (void)
13216 {
13217 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13218 || inst.operands[2].postind || inst.operands[2].writeback
13219 || inst.operands[2].immisreg || inst.operands[2].shifted
13220 || inst.operands[2].negative,
13221 BAD_ADDR_MODE);
13222
13223 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13224
13225 inst.instruction |= inst.operands[0].reg << 8;
13226 inst.instruction |= inst.operands[1].reg << 12;
13227 inst.instruction |= inst.operands[2].reg << 16;
13228 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13229 }
13230
13231 static void
13232 do_t_strexd (void)
13233 {
13234 if (!inst.operands[2].present)
13235 inst.operands[2].reg = inst.operands[1].reg + 1;
13236
13237 constraint (inst.operands[0].reg == inst.operands[1].reg
13238 || inst.operands[0].reg == inst.operands[2].reg
13239 || inst.operands[0].reg == inst.operands[3].reg,
13240 BAD_OVERLAP);
13241
13242 inst.instruction |= inst.operands[0].reg;
13243 inst.instruction |= inst.operands[1].reg << 12;
13244 inst.instruction |= inst.operands[2].reg << 8;
13245 inst.instruction |= inst.operands[3].reg << 16;
13246 }
13247
13248 static void
13249 do_t_sxtah (void)
13250 {
13251 unsigned Rd, Rn, Rm;
13252
13253 Rd = inst.operands[0].reg;
13254 Rn = inst.operands[1].reg;
13255 Rm = inst.operands[2].reg;
13256
13257 reject_bad_reg (Rd);
13258 reject_bad_reg (Rn);
13259 reject_bad_reg (Rm);
13260
13261 inst.instruction |= Rd << 8;
13262 inst.instruction |= Rn << 16;
13263 inst.instruction |= Rm;
13264 inst.instruction |= inst.operands[3].imm << 4;
13265 }
13266
13267 static void
13268 do_t_sxth (void)
13269 {
13270 unsigned Rd, Rm;
13271
13272 Rd = inst.operands[0].reg;
13273 Rm = inst.operands[1].reg;
13274
13275 reject_bad_reg (Rd);
13276 reject_bad_reg (Rm);
13277
13278 if (inst.instruction <= 0xffff
13279 && inst.size_req != 4
13280 && Rd <= 7 && Rm <= 7
13281 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13282 {
13283 inst.instruction = THUMB_OP16 (inst.instruction);
13284 inst.instruction |= Rd;
13285 inst.instruction |= Rm << 3;
13286 }
13287 else if (unified_syntax)
13288 {
13289 if (inst.instruction <= 0xffff)
13290 inst.instruction = THUMB_OP32 (inst.instruction);
13291 inst.instruction |= Rd << 8;
13292 inst.instruction |= Rm;
13293 inst.instruction |= inst.operands[2].imm << 4;
13294 }
13295 else
13296 {
13297 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13298 _("Thumb encoding does not support rotation"));
13299 constraint (1, BAD_HIREG);
13300 }
13301 }
13302
13303 static void
13304 do_t_swi (void)
13305 {
13306 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13307 }
13308
13309 static void
13310 do_t_tb (void)
13311 {
13312 unsigned Rn, Rm;
13313 int half;
13314
13315 half = (inst.instruction & 0x10) != 0;
13316 set_it_insn_type_last ();
13317 constraint (inst.operands[0].immisreg,
13318 _("instruction requires register index"));
13319
13320 Rn = inst.operands[0].reg;
13321 Rm = inst.operands[0].imm;
13322
13323 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13324 constraint (Rn == REG_SP, BAD_SP);
13325 reject_bad_reg (Rm);
13326
13327 constraint (!half && inst.operands[0].shifted,
13328 _("instruction does not allow shifted index"));
13329 inst.instruction |= (Rn << 16) | Rm;
13330 }
13331
13332 static void
13333 do_t_udf (void)
13334 {
13335 if (!inst.operands[0].present)
13336 inst.operands[0].imm = 0;
13337
13338 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13339 {
13340 constraint (inst.size_req == 2,
13341 _("immediate value out of range"));
13342 inst.instruction = THUMB_OP32 (inst.instruction);
13343 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13344 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13345 }
13346 else
13347 {
13348 inst.instruction = THUMB_OP16 (inst.instruction);
13349 inst.instruction |= inst.operands[0].imm;
13350 }
13351
13352 set_it_insn_type (NEUTRAL_IT_INSN);
13353 }
13354
13355
13356 static void
13357 do_t_usat (void)
13358 {
13359 do_t_ssat_usat (0);
13360 }
13361
13362 static void
13363 do_t_usat16 (void)
13364 {
13365 unsigned Rd, Rn;
13366
13367 Rd = inst.operands[0].reg;
13368 Rn = inst.operands[2].reg;
13369
13370 reject_bad_reg (Rd);
13371 reject_bad_reg (Rn);
13372
13373 inst.instruction |= Rd << 8;
13374 inst.instruction |= inst.operands[1].imm;
13375 inst.instruction |= Rn << 16;
13376 }
13377
13378 /* Checking the range of the branch offset (VAL) with NBITS bits
13379 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13380 static int
13381 v8_1_branch_value_check (int val, int nbits, int is_signed)
13382 {
13383 gas_assert (nbits > 0 && nbits <= 32);
13384 if (is_signed)
13385 {
13386 int cmp = (1 << (nbits - 1));
13387 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13388 return FAIL;
13389 }
13390 else
13391 {
13392 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13393 return FAIL;
13394 }
13395 return SUCCESS;
13396 }
13397
13398 /* For branches in Armv8.1-M Mainline. */
13399 static void
13400 do_t_branch_future (void)
13401 {
13402 unsigned long insn = inst.instruction;
13403
13404 inst.instruction = THUMB_OP32 (inst.instruction);
13405 if (inst.operands[0].hasreloc == 0)
13406 {
13407 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13408 as_bad (BAD_BRANCH_OFF);
13409
13410 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13411 }
13412 else
13413 {
13414 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13415 inst.relocs[0].pc_rel = 1;
13416 }
13417
13418 switch (insn)
13419 {
13420 case T_MNEM_bf:
13421 if (inst.operands[1].hasreloc == 0)
13422 {
13423 int val = inst.operands[1].imm;
13424 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13425 as_bad (BAD_BRANCH_OFF);
13426
13427 int immA = (val & 0x0001f000) >> 12;
13428 int immB = (val & 0x00000ffc) >> 2;
13429 int immC = (val & 0x00000002) >> 1;
13430 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13431 }
13432 else
13433 {
13434 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13435 inst.relocs[1].pc_rel = 1;
13436 }
13437 break;
13438
13439 case T_MNEM_bfl:
13440 if (inst.operands[1].hasreloc == 0)
13441 {
13442 int val = inst.operands[1].imm;
13443 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13444 as_bad (BAD_BRANCH_OFF);
13445
13446 int immA = (val & 0x0007f000) >> 12;
13447 int immB = (val & 0x00000ffc) >> 2;
13448 int immC = (val & 0x00000002) >> 1;
13449 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13450 }
13451 else
13452 {
13453 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13454 inst.relocs[1].pc_rel = 1;
13455 }
13456 break;
13457
13458 case T_MNEM_bfcsel:
13459 /* Operand 1. */
13460 if (inst.operands[1].hasreloc == 0)
13461 {
13462 int val = inst.operands[1].imm;
13463 int immA = (val & 0x00001000) >> 12;
13464 int immB = (val & 0x00000ffc) >> 2;
13465 int immC = (val & 0x00000002) >> 1;
13466 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13467 }
13468 else
13469 {
13470 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13471 inst.relocs[1].pc_rel = 1;
13472 }
13473
13474 /* Operand 2. */
13475 if (inst.operands[2].hasreloc == 0)
13476 {
13477 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13478 int val2 = inst.operands[2].imm;
13479 int val0 = inst.operands[0].imm & 0x1f;
13480 int diff = val2 - val0;
13481 if (diff == 4)
13482 inst.instruction |= 1 << 17; /* T bit. */
13483 else if (diff != 2)
13484 as_bad (_("out of range label-relative fixup value"));
13485 }
13486 else
13487 {
13488 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13489 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13490 inst.relocs[2].pc_rel = 1;
13491 }
13492
13493 /* Operand 3. */
13494 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13495 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13496 break;
13497
13498 case T_MNEM_bfx:
13499 case T_MNEM_bflx:
13500 inst.instruction |= inst.operands[1].reg << 16;
13501 break;
13502
13503 default: abort ();
13504 }
13505 }
13506
13507 /* Helper function for do_t_loloop to handle relocations. */
13508 static void
13509 v8_1_loop_reloc (int is_le)
13510 {
13511 if (inst.relocs[0].exp.X_op == O_constant)
13512 {
13513 int value = inst.relocs[0].exp.X_add_number;
13514 value = (is_le) ? -value : value;
13515
13516 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13517 as_bad (BAD_BRANCH_OFF);
13518
13519 int imml, immh;
13520
13521 immh = (value & 0x00000ffc) >> 2;
13522 imml = (value & 0x00000002) >> 1;
13523
13524 inst.instruction |= (imml << 11) | (immh << 1);
13525 }
13526 else
13527 {
13528 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13529 inst.relocs[0].pc_rel = 1;
13530 }
13531 }
13532
13533 /* To handle the Scalar Low Overhead Loop instructions
13534 in Armv8.1-M Mainline. */
13535 static void
13536 do_t_loloop (void)
13537 {
13538 unsigned long insn = inst.instruction;
13539
13540 set_it_insn_type (OUTSIDE_IT_INSN);
13541 inst.instruction = THUMB_OP32 (inst.instruction);
13542
13543 switch (insn)
13544 {
13545 case T_MNEM_le:
13546 /* le <label>. */
13547 if (!inst.operands[0].present)
13548 inst.instruction |= 1 << 21;
13549
13550 v8_1_loop_reloc (TRUE);
13551 break;
13552
13553 case T_MNEM_wls:
13554 v8_1_loop_reloc (FALSE);
13555 /* Fall through. */
13556 case T_MNEM_dls:
13557 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13558 inst.instruction |= (inst.operands[1].reg << 16);
13559 break;
13560
13561 default: abort();
13562 }
13563 }
13564
13565 /* Neon instruction encoder helpers. */
13566
13567 /* Encodings for the different types for various Neon opcodes. */
13568
13569 /* An "invalid" code for the following tables. */
13570 #define N_INV -1u
13571
13572 struct neon_tab_entry
13573 {
13574 unsigned integer;
13575 unsigned float_or_poly;
13576 unsigned scalar_or_imm;
13577 };
13578
13579 /* Map overloaded Neon opcodes to their respective encodings. */
13580 #define NEON_ENC_TAB \
13581 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13582 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13583 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13584 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13585 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13586 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13587 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13588 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13589 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13590 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13591 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13592 /* Register variants of the following two instructions are encoded as
13593 vcge / vcgt with the operands reversed. */ \
13594 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13595 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13596 X(vfma, N_INV, 0x0000c10, N_INV), \
13597 X(vfms, N_INV, 0x0200c10, N_INV), \
13598 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13599 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13600 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13601 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13602 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13603 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13604 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13605 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13606 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13607 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13608 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13609 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13610 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13611 X(vshl, 0x0000400, N_INV, 0x0800510), \
13612 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13613 X(vand, 0x0000110, N_INV, 0x0800030), \
13614 X(vbic, 0x0100110, N_INV, 0x0800030), \
13615 X(veor, 0x1000110, N_INV, N_INV), \
13616 X(vorn, 0x0300110, N_INV, 0x0800010), \
13617 X(vorr, 0x0200110, N_INV, 0x0800010), \
13618 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13619 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13620 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13621 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13622 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13623 X(vst1, 0x0000000, 0x0800000, N_INV), \
13624 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13625 X(vst2, 0x0000100, 0x0800100, N_INV), \
13626 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13627 X(vst3, 0x0000200, 0x0800200, N_INV), \
13628 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13629 X(vst4, 0x0000300, 0x0800300, N_INV), \
13630 X(vmovn, 0x1b20200, N_INV, N_INV), \
13631 X(vtrn, 0x1b20080, N_INV, N_INV), \
13632 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13633 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13634 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13635 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13636 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13637 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13638 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13639 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13640 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13641 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13642 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13643 X(vseleq, 0xe000a00, N_INV, N_INV), \
13644 X(vselvs, 0xe100a00, N_INV, N_INV), \
13645 X(vselge, 0xe200a00, N_INV, N_INV), \
13646 X(vselgt, 0xe300a00, N_INV, N_INV), \
13647 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13648 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13649 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13650 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13651 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13652 X(aes, 0x3b00300, N_INV, N_INV), \
13653 X(sha3op, 0x2000c00, N_INV, N_INV), \
13654 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13655 X(sha2op, 0x3ba0380, N_INV, N_INV)
13656
13657 enum neon_opc
13658 {
13659 #define X(OPC,I,F,S) N_MNEM_##OPC
13660 NEON_ENC_TAB
13661 #undef X
13662 };
13663
13664 static const struct neon_tab_entry neon_enc_tab[] =
13665 {
13666 #define X(OPC,I,F,S) { (I), (F), (S) }
13667 NEON_ENC_TAB
13668 #undef X
13669 };
13670
13671 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13672 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13673 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13674 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13675 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13676 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13677 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13678 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13679 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13680 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13681 #define NEON_ENC_SINGLE_(X) \
13682 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13683 #define NEON_ENC_DOUBLE_(X) \
13684 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13685 #define NEON_ENC_FPV8_(X) \
13686 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13687
13688 #define NEON_ENCODE(type, inst) \
13689 do \
13690 { \
13691 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13692 inst.is_neon = 1; \
13693 } \
13694 while (0)
13695
13696 #define check_neon_suffixes \
13697 do \
13698 { \
13699 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13700 { \
13701 as_bad (_("invalid neon suffix for non neon instruction")); \
13702 return; \
13703 } \
13704 } \
13705 while (0)
13706
13707 /* Define shapes for instruction operands. The following mnemonic characters
13708 are used in this table:
13709
13710 F - VFP S<n> register
13711 D - Neon D<n> register
13712 Q - Neon Q<n> register
13713 I - Immediate
13714 S - Scalar
13715 R - ARM register
13716 L - D<n> register list
13717
13718 This table is used to generate various data:
13719 - enumerations of the form NS_DDR to be used as arguments to
13720 neon_select_shape.
13721 - a table classifying shapes into single, double, quad, mixed.
13722 - a table used to drive neon_select_shape. */
13723
13724 #define NEON_SHAPE_DEF \
13725 X(3, (D, D, D), DOUBLE), \
13726 X(3, (Q, Q, Q), QUAD), \
13727 X(3, (D, D, I), DOUBLE), \
13728 X(3, (Q, Q, I), QUAD), \
13729 X(3, (D, D, S), DOUBLE), \
13730 X(3, (Q, Q, S), QUAD), \
13731 X(2, (D, D), DOUBLE), \
13732 X(2, (Q, Q), QUAD), \
13733 X(2, (D, S), DOUBLE), \
13734 X(2, (Q, S), QUAD), \
13735 X(2, (D, R), DOUBLE), \
13736 X(2, (Q, R), QUAD), \
13737 X(2, (D, I), DOUBLE), \
13738 X(2, (Q, I), QUAD), \
13739 X(3, (D, L, D), DOUBLE), \
13740 X(2, (D, Q), MIXED), \
13741 X(2, (Q, D), MIXED), \
13742 X(3, (D, Q, I), MIXED), \
13743 X(3, (Q, D, I), MIXED), \
13744 X(3, (Q, D, D), MIXED), \
13745 X(3, (D, Q, Q), MIXED), \
13746 X(3, (Q, Q, D), MIXED), \
13747 X(3, (Q, D, S), MIXED), \
13748 X(3, (D, Q, S), MIXED), \
13749 X(4, (D, D, D, I), DOUBLE), \
13750 X(4, (Q, Q, Q, I), QUAD), \
13751 X(4, (D, D, S, I), DOUBLE), \
13752 X(4, (Q, Q, S, I), QUAD), \
13753 X(2, (F, F), SINGLE), \
13754 X(3, (F, F, F), SINGLE), \
13755 X(2, (F, I), SINGLE), \
13756 X(2, (F, D), MIXED), \
13757 X(2, (D, F), MIXED), \
13758 X(3, (F, F, I), MIXED), \
13759 X(4, (R, R, F, F), SINGLE), \
13760 X(4, (F, F, R, R), SINGLE), \
13761 X(3, (D, R, R), DOUBLE), \
13762 X(3, (R, R, D), DOUBLE), \
13763 X(2, (S, R), SINGLE), \
13764 X(2, (R, S), SINGLE), \
13765 X(2, (F, R), SINGLE), \
13766 X(2, (R, F), SINGLE), \
13767 /* Half float shape supported so far. */\
13768 X (2, (H, D), MIXED), \
13769 X (2, (D, H), MIXED), \
13770 X (2, (H, F), MIXED), \
13771 X (2, (F, H), MIXED), \
13772 X (2, (H, H), HALF), \
13773 X (2, (H, R), HALF), \
13774 X (2, (R, H), HALF), \
13775 X (2, (H, I), HALF), \
13776 X (3, (H, H, H), HALF), \
13777 X (3, (H, F, I), MIXED), \
13778 X (3, (F, H, I), MIXED), \
13779 X (3, (D, H, H), MIXED), \
13780 X (3, (D, H, S), MIXED)
13781
13782 #define S2(A,B) NS_##A##B
13783 #define S3(A,B,C) NS_##A##B##C
13784 #define S4(A,B,C,D) NS_##A##B##C##D
13785
13786 #define X(N, L, C) S##N L
13787
13788 enum neon_shape
13789 {
13790 NEON_SHAPE_DEF,
13791 NS_NULL
13792 };
13793
13794 #undef X
13795 #undef S2
13796 #undef S3
13797 #undef S4
13798
13799 enum neon_shape_class
13800 {
13801 SC_HALF,
13802 SC_SINGLE,
13803 SC_DOUBLE,
13804 SC_QUAD,
13805 SC_MIXED
13806 };
13807
13808 #define X(N, L, C) SC_##C
13809
13810 static enum neon_shape_class neon_shape_class[] =
13811 {
13812 NEON_SHAPE_DEF
13813 };
13814
13815 #undef X
13816
13817 enum neon_shape_el
13818 {
13819 SE_H,
13820 SE_F,
13821 SE_D,
13822 SE_Q,
13823 SE_I,
13824 SE_S,
13825 SE_R,
13826 SE_L
13827 };
13828
13829 /* Register widths of above. */
13830 static unsigned neon_shape_el_size[] =
13831 {
13832 16,
13833 32,
13834 64,
13835 128,
13836 0,
13837 32,
13838 32,
13839 0
13840 };
13841
13842 struct neon_shape_info
13843 {
13844 unsigned els;
13845 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13846 };
13847
13848 #define S2(A,B) { SE_##A, SE_##B }
13849 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13850 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13851
13852 #define X(N, L, C) { N, S##N L }
13853
13854 static struct neon_shape_info neon_shape_tab[] =
13855 {
13856 NEON_SHAPE_DEF
13857 };
13858
13859 #undef X
13860 #undef S2
13861 #undef S3
13862 #undef S4
13863
13864 /* Bit masks used in type checking given instructions.
13865 'N_EQK' means the type must be the same as (or based on in some way) the key
13866 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13867 set, various other bits can be set as well in order to modify the meaning of
13868 the type constraint. */
13869
13870 enum neon_type_mask
13871 {
13872 N_S8 = 0x0000001,
13873 N_S16 = 0x0000002,
13874 N_S32 = 0x0000004,
13875 N_S64 = 0x0000008,
13876 N_U8 = 0x0000010,
13877 N_U16 = 0x0000020,
13878 N_U32 = 0x0000040,
13879 N_U64 = 0x0000080,
13880 N_I8 = 0x0000100,
13881 N_I16 = 0x0000200,
13882 N_I32 = 0x0000400,
13883 N_I64 = 0x0000800,
13884 N_8 = 0x0001000,
13885 N_16 = 0x0002000,
13886 N_32 = 0x0004000,
13887 N_64 = 0x0008000,
13888 N_P8 = 0x0010000,
13889 N_P16 = 0x0020000,
13890 N_F16 = 0x0040000,
13891 N_F32 = 0x0080000,
13892 N_F64 = 0x0100000,
13893 N_P64 = 0x0200000,
13894 N_KEY = 0x1000000, /* Key element (main type specifier). */
13895 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13896 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13897 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13898 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13899 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13900 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13901 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13902 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13903 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13904 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13905 N_UTYP = 0,
13906 N_MAX_NONSPECIAL = N_P64
13907 };
13908
13909 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13910
13911 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13912 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13913 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13914 #define N_S_32 (N_S8 | N_S16 | N_S32)
13915 #define N_F_16_32 (N_F16 | N_F32)
13916 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13917 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13918 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13919 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13920
13921 /* Pass this as the first type argument to neon_check_type to ignore types
13922 altogether. */
13923 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13924
13925 /* Select a "shape" for the current instruction (describing register types or
13926 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13927 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13928 function of operand parsing, so this function doesn't need to be called.
13929 Shapes should be listed in order of decreasing length. */
13930
13931 static enum neon_shape
13932 neon_select_shape (enum neon_shape shape, ...)
13933 {
13934 va_list ap;
13935 enum neon_shape first_shape = shape;
13936
13937 /* Fix missing optional operands. FIXME: we don't know at this point how
13938 many arguments we should have, so this makes the assumption that we have
13939 > 1. This is true of all current Neon opcodes, I think, but may not be
13940 true in the future. */
13941 if (!inst.operands[1].present)
13942 inst.operands[1] = inst.operands[0];
13943
13944 va_start (ap, shape);
13945
13946 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13947 {
13948 unsigned j;
13949 int matches = 1;
13950
13951 for (j = 0; j < neon_shape_tab[shape].els; j++)
13952 {
13953 if (!inst.operands[j].present)
13954 {
13955 matches = 0;
13956 break;
13957 }
13958
13959 switch (neon_shape_tab[shape].el[j])
13960 {
13961 /* If a .f16, .16, .u16, .s16 type specifier is given over
13962 a VFP single precision register operand, it's essentially
13963 means only half of the register is used.
13964
13965 If the type specifier is given after the mnemonics, the
13966 information is stored in inst.vectype. If the type specifier
13967 is given after register operand, the information is stored
13968 in inst.operands[].vectype.
13969
13970 When there is only one type specifier, and all the register
13971 operands are the same type of hardware register, the type
13972 specifier applies to all register operands.
13973
13974 If no type specifier is given, the shape is inferred from
13975 operand information.
13976
13977 for example:
13978 vadd.f16 s0, s1, s2: NS_HHH
13979 vabs.f16 s0, s1: NS_HH
13980 vmov.f16 s0, r1: NS_HR
13981 vmov.f16 r0, s1: NS_RH
13982 vcvt.f16 r0, s1: NS_RH
13983 vcvt.f16.s32 s2, s2, #29: NS_HFI
13984 vcvt.f16.s32 s2, s2: NS_HF
13985 */
13986 case SE_H:
13987 if (!(inst.operands[j].isreg
13988 && inst.operands[j].isvec
13989 && inst.operands[j].issingle
13990 && !inst.operands[j].isquad
13991 && ((inst.vectype.elems == 1
13992 && inst.vectype.el[0].size == 16)
13993 || (inst.vectype.elems > 1
13994 && inst.vectype.el[j].size == 16)
13995 || (inst.vectype.elems == 0
13996 && inst.operands[j].vectype.type != NT_invtype
13997 && inst.operands[j].vectype.size == 16))))
13998 matches = 0;
13999 break;
14000
14001 case SE_F:
14002 if (!(inst.operands[j].isreg
14003 && inst.operands[j].isvec
14004 && inst.operands[j].issingle
14005 && !inst.operands[j].isquad
14006 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14007 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14008 || (inst.vectype.elems == 0
14009 && (inst.operands[j].vectype.size == 32
14010 || inst.operands[j].vectype.type == NT_invtype)))))
14011 matches = 0;
14012 break;
14013
14014 case SE_D:
14015 if (!(inst.operands[j].isreg
14016 && inst.operands[j].isvec
14017 && !inst.operands[j].isquad
14018 && !inst.operands[j].issingle))
14019 matches = 0;
14020 break;
14021
14022 case SE_R:
14023 if (!(inst.operands[j].isreg
14024 && !inst.operands[j].isvec))
14025 matches = 0;
14026 break;
14027
14028 case SE_Q:
14029 if (!(inst.operands[j].isreg
14030 && inst.operands[j].isvec
14031 && inst.operands[j].isquad
14032 && !inst.operands[j].issingle))
14033 matches = 0;
14034 break;
14035
14036 case SE_I:
14037 if (!(!inst.operands[j].isreg
14038 && !inst.operands[j].isscalar))
14039 matches = 0;
14040 break;
14041
14042 case SE_S:
14043 if (!(!inst.operands[j].isreg
14044 && inst.operands[j].isscalar))
14045 matches = 0;
14046 break;
14047
14048 case SE_L:
14049 break;
14050 }
14051 if (!matches)
14052 break;
14053 }
14054 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14055 /* We've matched all the entries in the shape table, and we don't
14056 have any left over operands which have not been matched. */
14057 break;
14058 }
14059
14060 va_end (ap);
14061
14062 if (shape == NS_NULL && first_shape != NS_NULL)
14063 first_error (_("invalid instruction shape"));
14064
14065 return shape;
14066 }
14067
14068 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14069 means the Q bit should be set). */
14070
14071 static int
14072 neon_quad (enum neon_shape shape)
14073 {
14074 return neon_shape_class[shape] == SC_QUAD;
14075 }
14076
14077 static void
14078 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14079 unsigned *g_size)
14080 {
14081 /* Allow modification to be made to types which are constrained to be
14082 based on the key element, based on bits set alongside N_EQK. */
14083 if ((typebits & N_EQK) != 0)
14084 {
14085 if ((typebits & N_HLF) != 0)
14086 *g_size /= 2;
14087 else if ((typebits & N_DBL) != 0)
14088 *g_size *= 2;
14089 if ((typebits & N_SGN) != 0)
14090 *g_type = NT_signed;
14091 else if ((typebits & N_UNS) != 0)
14092 *g_type = NT_unsigned;
14093 else if ((typebits & N_INT) != 0)
14094 *g_type = NT_integer;
14095 else if ((typebits & N_FLT) != 0)
14096 *g_type = NT_float;
14097 else if ((typebits & N_SIZ) != 0)
14098 *g_type = NT_untyped;
14099 }
14100 }
14101
14102 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14103 operand type, i.e. the single type specified in a Neon instruction when it
14104 is the only one given. */
14105
14106 static struct neon_type_el
14107 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14108 {
14109 struct neon_type_el dest = *key;
14110
14111 gas_assert ((thisarg & N_EQK) != 0);
14112
14113 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14114
14115 return dest;
14116 }
14117
14118 /* Convert Neon type and size into compact bitmask representation. */
14119
14120 static enum neon_type_mask
14121 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14122 {
14123 switch (type)
14124 {
14125 case NT_untyped:
14126 switch (size)
14127 {
14128 case 8: return N_8;
14129 case 16: return N_16;
14130 case 32: return N_32;
14131 case 64: return N_64;
14132 default: ;
14133 }
14134 break;
14135
14136 case NT_integer:
14137 switch (size)
14138 {
14139 case 8: return N_I8;
14140 case 16: return N_I16;
14141 case 32: return N_I32;
14142 case 64: return N_I64;
14143 default: ;
14144 }
14145 break;
14146
14147 case NT_float:
14148 switch (size)
14149 {
14150 case 16: return N_F16;
14151 case 32: return N_F32;
14152 case 64: return N_F64;
14153 default: ;
14154 }
14155 break;
14156
14157 case NT_poly:
14158 switch (size)
14159 {
14160 case 8: return N_P8;
14161 case 16: return N_P16;
14162 case 64: return N_P64;
14163 default: ;
14164 }
14165 break;
14166
14167 case NT_signed:
14168 switch (size)
14169 {
14170 case 8: return N_S8;
14171 case 16: return N_S16;
14172 case 32: return N_S32;
14173 case 64: return N_S64;
14174 default: ;
14175 }
14176 break;
14177
14178 case NT_unsigned:
14179 switch (size)
14180 {
14181 case 8: return N_U8;
14182 case 16: return N_U16;
14183 case 32: return N_U32;
14184 case 64: return N_U64;
14185 default: ;
14186 }
14187 break;
14188
14189 default: ;
14190 }
14191
14192 return N_UTYP;
14193 }
14194
14195 /* Convert compact Neon bitmask type representation to a type and size. Only
14196 handles the case where a single bit is set in the mask. */
14197
14198 static int
14199 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14200 enum neon_type_mask mask)
14201 {
14202 if ((mask & N_EQK) != 0)
14203 return FAIL;
14204
14205 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14206 *size = 8;
14207 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14208 *size = 16;
14209 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14210 *size = 32;
14211 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14212 *size = 64;
14213 else
14214 return FAIL;
14215
14216 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14217 *type = NT_signed;
14218 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14219 *type = NT_unsigned;
14220 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14221 *type = NT_integer;
14222 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14223 *type = NT_untyped;
14224 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14225 *type = NT_poly;
14226 else if ((mask & (N_F_ALL)) != 0)
14227 *type = NT_float;
14228 else
14229 return FAIL;
14230
14231 return SUCCESS;
14232 }
14233
14234 /* Modify a bitmask of allowed types. This is only needed for type
14235 relaxation. */
14236
14237 static unsigned
14238 modify_types_allowed (unsigned allowed, unsigned mods)
14239 {
14240 unsigned size;
14241 enum neon_el_type type;
14242 unsigned destmask;
14243 int i;
14244
14245 destmask = 0;
14246
14247 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14248 {
14249 if (el_type_of_type_chk (&type, &size,
14250 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14251 {
14252 neon_modify_type_size (mods, &type, &size);
14253 destmask |= type_chk_of_el_type (type, size);
14254 }
14255 }
14256
14257 return destmask;
14258 }
14259
14260 /* Check type and return type classification.
14261 The manual states (paraphrase): If one datatype is given, it indicates the
14262 type given in:
14263 - the second operand, if there is one
14264 - the operand, if there is no second operand
14265 - the result, if there are no operands.
14266 This isn't quite good enough though, so we use a concept of a "key" datatype
14267 which is set on a per-instruction basis, which is the one which matters when
14268 only one data type is written.
14269 Note: this function has side-effects (e.g. filling in missing operands). All
14270 Neon instructions should call it before performing bit encoding. */
14271
14272 static struct neon_type_el
14273 neon_check_type (unsigned els, enum neon_shape ns, ...)
14274 {
14275 va_list ap;
14276 unsigned i, pass, key_el = 0;
14277 unsigned types[NEON_MAX_TYPE_ELS];
14278 enum neon_el_type k_type = NT_invtype;
14279 unsigned k_size = -1u;
14280 struct neon_type_el badtype = {NT_invtype, -1};
14281 unsigned key_allowed = 0;
14282
14283 /* Optional registers in Neon instructions are always (not) in operand 1.
14284 Fill in the missing operand here, if it was omitted. */
14285 if (els > 1 && !inst.operands[1].present)
14286 inst.operands[1] = inst.operands[0];
14287
14288 /* Suck up all the varargs. */
14289 va_start (ap, ns);
14290 for (i = 0; i < els; i++)
14291 {
14292 unsigned thisarg = va_arg (ap, unsigned);
14293 if (thisarg == N_IGNORE_TYPE)
14294 {
14295 va_end (ap);
14296 return badtype;
14297 }
14298 types[i] = thisarg;
14299 if ((thisarg & N_KEY) != 0)
14300 key_el = i;
14301 }
14302 va_end (ap);
14303
14304 if (inst.vectype.elems > 0)
14305 for (i = 0; i < els; i++)
14306 if (inst.operands[i].vectype.type != NT_invtype)
14307 {
14308 first_error (_("types specified in both the mnemonic and operands"));
14309 return badtype;
14310 }
14311
14312 /* Duplicate inst.vectype elements here as necessary.
14313 FIXME: No idea if this is exactly the same as the ARM assembler,
14314 particularly when an insn takes one register and one non-register
14315 operand. */
14316 if (inst.vectype.elems == 1 && els > 1)
14317 {
14318 unsigned j;
14319 inst.vectype.elems = els;
14320 inst.vectype.el[key_el] = inst.vectype.el[0];
14321 for (j = 0; j < els; j++)
14322 if (j != key_el)
14323 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14324 types[j]);
14325 }
14326 else if (inst.vectype.elems == 0 && els > 0)
14327 {
14328 unsigned j;
14329 /* No types were given after the mnemonic, so look for types specified
14330 after each operand. We allow some flexibility here; as long as the
14331 "key" operand has a type, we can infer the others. */
14332 for (j = 0; j < els; j++)
14333 if (inst.operands[j].vectype.type != NT_invtype)
14334 inst.vectype.el[j] = inst.operands[j].vectype;
14335
14336 if (inst.operands[key_el].vectype.type != NT_invtype)
14337 {
14338 for (j = 0; j < els; j++)
14339 if (inst.operands[j].vectype.type == NT_invtype)
14340 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14341 types[j]);
14342 }
14343 else
14344 {
14345 first_error (_("operand types can't be inferred"));
14346 return badtype;
14347 }
14348 }
14349 else if (inst.vectype.elems != els)
14350 {
14351 first_error (_("type specifier has the wrong number of parts"));
14352 return badtype;
14353 }
14354
14355 for (pass = 0; pass < 2; pass++)
14356 {
14357 for (i = 0; i < els; i++)
14358 {
14359 unsigned thisarg = types[i];
14360 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14361 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14362 enum neon_el_type g_type = inst.vectype.el[i].type;
14363 unsigned g_size = inst.vectype.el[i].size;
14364
14365 /* Decay more-specific signed & unsigned types to sign-insensitive
14366 integer types if sign-specific variants are unavailable. */
14367 if ((g_type == NT_signed || g_type == NT_unsigned)
14368 && (types_allowed & N_SU_ALL) == 0)
14369 g_type = NT_integer;
14370
14371 /* If only untyped args are allowed, decay any more specific types to
14372 them. Some instructions only care about signs for some element
14373 sizes, so handle that properly. */
14374 if (((types_allowed & N_UNT) == 0)
14375 && ((g_size == 8 && (types_allowed & N_8) != 0)
14376 || (g_size == 16 && (types_allowed & N_16) != 0)
14377 || (g_size == 32 && (types_allowed & N_32) != 0)
14378 || (g_size == 64 && (types_allowed & N_64) != 0)))
14379 g_type = NT_untyped;
14380
14381 if (pass == 0)
14382 {
14383 if ((thisarg & N_KEY) != 0)
14384 {
14385 k_type = g_type;
14386 k_size = g_size;
14387 key_allowed = thisarg & ~N_KEY;
14388
14389 /* Check architecture constraint on FP16 extension. */
14390 if (k_size == 16
14391 && k_type == NT_float
14392 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14393 {
14394 inst.error = _(BAD_FP16);
14395 return badtype;
14396 }
14397 }
14398 }
14399 else
14400 {
14401 if ((thisarg & N_VFP) != 0)
14402 {
14403 enum neon_shape_el regshape;
14404 unsigned regwidth, match;
14405
14406 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14407 if (ns == NS_NULL)
14408 {
14409 first_error (_("invalid instruction shape"));
14410 return badtype;
14411 }
14412 regshape = neon_shape_tab[ns].el[i];
14413 regwidth = neon_shape_el_size[regshape];
14414
14415 /* In VFP mode, operands must match register widths. If we
14416 have a key operand, use its width, else use the width of
14417 the current operand. */
14418 if (k_size != -1u)
14419 match = k_size;
14420 else
14421 match = g_size;
14422
14423 /* FP16 will use a single precision register. */
14424 if (regwidth == 32 && match == 16)
14425 {
14426 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14427 match = regwidth;
14428 else
14429 {
14430 inst.error = _(BAD_FP16);
14431 return badtype;
14432 }
14433 }
14434
14435 if (regwidth != match)
14436 {
14437 first_error (_("operand size must match register width"));
14438 return badtype;
14439 }
14440 }
14441
14442 if ((thisarg & N_EQK) == 0)
14443 {
14444 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14445
14446 if ((given_type & types_allowed) == 0)
14447 {
14448 first_error (_("bad type in Neon instruction"));
14449 return badtype;
14450 }
14451 }
14452 else
14453 {
14454 enum neon_el_type mod_k_type = k_type;
14455 unsigned mod_k_size = k_size;
14456 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14457 if (g_type != mod_k_type || g_size != mod_k_size)
14458 {
14459 first_error (_("inconsistent types in Neon instruction"));
14460 return badtype;
14461 }
14462 }
14463 }
14464 }
14465 }
14466
14467 return inst.vectype.el[key_el];
14468 }
14469
14470 /* Neon-style VFP instruction forwarding. */
14471
14472 /* Thumb VFP instructions have 0xE in the condition field. */
14473
14474 static void
14475 do_vfp_cond_or_thumb (void)
14476 {
14477 inst.is_neon = 1;
14478
14479 if (thumb_mode)
14480 inst.instruction |= 0xe0000000;
14481 else
14482 inst.instruction |= inst.cond << 28;
14483 }
14484
14485 /* Look up and encode a simple mnemonic, for use as a helper function for the
14486 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14487 etc. It is assumed that operand parsing has already been done, and that the
14488 operands are in the form expected by the given opcode (this isn't necessarily
14489 the same as the form in which they were parsed, hence some massaging must
14490 take place before this function is called).
14491 Checks current arch version against that in the looked-up opcode. */
14492
14493 static void
14494 do_vfp_nsyn_opcode (const char *opname)
14495 {
14496 const struct asm_opcode *opcode;
14497
14498 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14499
14500 if (!opcode)
14501 abort ();
14502
14503 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14504 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14505 _(BAD_FPU));
14506
14507 inst.is_neon = 1;
14508
14509 if (thumb_mode)
14510 {
14511 inst.instruction = opcode->tvalue;
14512 opcode->tencode ();
14513 }
14514 else
14515 {
14516 inst.instruction = (inst.cond << 28) | opcode->avalue;
14517 opcode->aencode ();
14518 }
14519 }
14520
14521 static void
14522 do_vfp_nsyn_add_sub (enum neon_shape rs)
14523 {
14524 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14525
14526 if (rs == NS_FFF || rs == NS_HHH)
14527 {
14528 if (is_add)
14529 do_vfp_nsyn_opcode ("fadds");
14530 else
14531 do_vfp_nsyn_opcode ("fsubs");
14532
14533 /* ARMv8.2 fp16 instruction. */
14534 if (rs == NS_HHH)
14535 do_scalar_fp16_v82_encode ();
14536 }
14537 else
14538 {
14539 if (is_add)
14540 do_vfp_nsyn_opcode ("faddd");
14541 else
14542 do_vfp_nsyn_opcode ("fsubd");
14543 }
14544 }
14545
14546 /* Check operand types to see if this is a VFP instruction, and if so call
14547 PFN (). */
14548
14549 static int
14550 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14551 {
14552 enum neon_shape rs;
14553 struct neon_type_el et;
14554
14555 switch (args)
14556 {
14557 case 2:
14558 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14559 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14560 break;
14561
14562 case 3:
14563 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14564 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14565 N_F_ALL | N_KEY | N_VFP);
14566 break;
14567
14568 default:
14569 abort ();
14570 }
14571
14572 if (et.type != NT_invtype)
14573 {
14574 pfn (rs);
14575 return SUCCESS;
14576 }
14577
14578 inst.error = NULL;
14579 return FAIL;
14580 }
14581
14582 static void
14583 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14584 {
14585 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14586
14587 if (rs == NS_FFF || rs == NS_HHH)
14588 {
14589 if (is_mla)
14590 do_vfp_nsyn_opcode ("fmacs");
14591 else
14592 do_vfp_nsyn_opcode ("fnmacs");
14593
14594 /* ARMv8.2 fp16 instruction. */
14595 if (rs == NS_HHH)
14596 do_scalar_fp16_v82_encode ();
14597 }
14598 else
14599 {
14600 if (is_mla)
14601 do_vfp_nsyn_opcode ("fmacd");
14602 else
14603 do_vfp_nsyn_opcode ("fnmacd");
14604 }
14605 }
14606
14607 static void
14608 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14609 {
14610 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14611
14612 if (rs == NS_FFF || rs == NS_HHH)
14613 {
14614 if (is_fma)
14615 do_vfp_nsyn_opcode ("ffmas");
14616 else
14617 do_vfp_nsyn_opcode ("ffnmas");
14618
14619 /* ARMv8.2 fp16 instruction. */
14620 if (rs == NS_HHH)
14621 do_scalar_fp16_v82_encode ();
14622 }
14623 else
14624 {
14625 if (is_fma)
14626 do_vfp_nsyn_opcode ("ffmad");
14627 else
14628 do_vfp_nsyn_opcode ("ffnmad");
14629 }
14630 }
14631
14632 static void
14633 do_vfp_nsyn_mul (enum neon_shape rs)
14634 {
14635 if (rs == NS_FFF || rs == NS_HHH)
14636 {
14637 do_vfp_nsyn_opcode ("fmuls");
14638
14639 /* ARMv8.2 fp16 instruction. */
14640 if (rs == NS_HHH)
14641 do_scalar_fp16_v82_encode ();
14642 }
14643 else
14644 do_vfp_nsyn_opcode ("fmuld");
14645 }
14646
14647 static void
14648 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14649 {
14650 int is_neg = (inst.instruction & 0x80) != 0;
14651 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14652
14653 if (rs == NS_FF || rs == NS_HH)
14654 {
14655 if (is_neg)
14656 do_vfp_nsyn_opcode ("fnegs");
14657 else
14658 do_vfp_nsyn_opcode ("fabss");
14659
14660 /* ARMv8.2 fp16 instruction. */
14661 if (rs == NS_HH)
14662 do_scalar_fp16_v82_encode ();
14663 }
14664 else
14665 {
14666 if (is_neg)
14667 do_vfp_nsyn_opcode ("fnegd");
14668 else
14669 do_vfp_nsyn_opcode ("fabsd");
14670 }
14671 }
14672
14673 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14674 insns belong to Neon, and are handled elsewhere. */
14675
14676 static void
14677 do_vfp_nsyn_ldm_stm (int is_dbmode)
14678 {
14679 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14680 if (is_ldm)
14681 {
14682 if (is_dbmode)
14683 do_vfp_nsyn_opcode ("fldmdbs");
14684 else
14685 do_vfp_nsyn_opcode ("fldmias");
14686 }
14687 else
14688 {
14689 if (is_dbmode)
14690 do_vfp_nsyn_opcode ("fstmdbs");
14691 else
14692 do_vfp_nsyn_opcode ("fstmias");
14693 }
14694 }
14695
14696 static void
14697 do_vfp_nsyn_sqrt (void)
14698 {
14699 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14700 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14701
14702 if (rs == NS_FF || rs == NS_HH)
14703 {
14704 do_vfp_nsyn_opcode ("fsqrts");
14705
14706 /* ARMv8.2 fp16 instruction. */
14707 if (rs == NS_HH)
14708 do_scalar_fp16_v82_encode ();
14709 }
14710 else
14711 do_vfp_nsyn_opcode ("fsqrtd");
14712 }
14713
14714 static void
14715 do_vfp_nsyn_div (void)
14716 {
14717 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14718 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14719 N_F_ALL | N_KEY | N_VFP);
14720
14721 if (rs == NS_FFF || rs == NS_HHH)
14722 {
14723 do_vfp_nsyn_opcode ("fdivs");
14724
14725 /* ARMv8.2 fp16 instruction. */
14726 if (rs == NS_HHH)
14727 do_scalar_fp16_v82_encode ();
14728 }
14729 else
14730 do_vfp_nsyn_opcode ("fdivd");
14731 }
14732
14733 static void
14734 do_vfp_nsyn_nmul (void)
14735 {
14736 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14737 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14738 N_F_ALL | N_KEY | N_VFP);
14739
14740 if (rs == NS_FFF || rs == NS_HHH)
14741 {
14742 NEON_ENCODE (SINGLE, inst);
14743 do_vfp_sp_dyadic ();
14744
14745 /* ARMv8.2 fp16 instruction. */
14746 if (rs == NS_HHH)
14747 do_scalar_fp16_v82_encode ();
14748 }
14749 else
14750 {
14751 NEON_ENCODE (DOUBLE, inst);
14752 do_vfp_dp_rd_rn_rm ();
14753 }
14754 do_vfp_cond_or_thumb ();
14755
14756 }
14757
14758 static void
14759 do_vfp_nsyn_cmp (void)
14760 {
14761 enum neon_shape rs;
14762 if (inst.operands[1].isreg)
14763 {
14764 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14765 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14766
14767 if (rs == NS_FF || rs == NS_HH)
14768 {
14769 NEON_ENCODE (SINGLE, inst);
14770 do_vfp_sp_monadic ();
14771 }
14772 else
14773 {
14774 NEON_ENCODE (DOUBLE, inst);
14775 do_vfp_dp_rd_rm ();
14776 }
14777 }
14778 else
14779 {
14780 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14781 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14782
14783 switch (inst.instruction & 0x0fffffff)
14784 {
14785 case N_MNEM_vcmp:
14786 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14787 break;
14788 case N_MNEM_vcmpe:
14789 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14790 break;
14791 default:
14792 abort ();
14793 }
14794
14795 if (rs == NS_FI || rs == NS_HI)
14796 {
14797 NEON_ENCODE (SINGLE, inst);
14798 do_vfp_sp_compare_z ();
14799 }
14800 else
14801 {
14802 NEON_ENCODE (DOUBLE, inst);
14803 do_vfp_dp_rd ();
14804 }
14805 }
14806 do_vfp_cond_or_thumb ();
14807
14808 /* ARMv8.2 fp16 instruction. */
14809 if (rs == NS_HI || rs == NS_HH)
14810 do_scalar_fp16_v82_encode ();
14811 }
14812
14813 static void
14814 nsyn_insert_sp (void)
14815 {
14816 inst.operands[1] = inst.operands[0];
14817 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14818 inst.operands[0].reg = REG_SP;
14819 inst.operands[0].isreg = 1;
14820 inst.operands[0].writeback = 1;
14821 inst.operands[0].present = 1;
14822 }
14823
14824 static void
14825 do_vfp_nsyn_push (void)
14826 {
14827 nsyn_insert_sp ();
14828
14829 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14830 _("register list must contain at least 1 and at most 16 "
14831 "registers"));
14832
14833 if (inst.operands[1].issingle)
14834 do_vfp_nsyn_opcode ("fstmdbs");
14835 else
14836 do_vfp_nsyn_opcode ("fstmdbd");
14837 }
14838
14839 static void
14840 do_vfp_nsyn_pop (void)
14841 {
14842 nsyn_insert_sp ();
14843
14844 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14845 _("register list must contain at least 1 and at most 16 "
14846 "registers"));
14847
14848 if (inst.operands[1].issingle)
14849 do_vfp_nsyn_opcode ("fldmias");
14850 else
14851 do_vfp_nsyn_opcode ("fldmiad");
14852 }
14853
14854 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14855 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14856
14857 static void
14858 neon_dp_fixup (struct arm_it* insn)
14859 {
14860 unsigned int i = insn->instruction;
14861 insn->is_neon = 1;
14862
14863 if (thumb_mode)
14864 {
14865 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14866 if (i & (1 << 24))
14867 i |= 1 << 28;
14868
14869 i &= ~(1 << 24);
14870
14871 i |= 0xef000000;
14872 }
14873 else
14874 i |= 0xf2000000;
14875
14876 insn->instruction = i;
14877 }
14878
14879 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14880 (0, 1, 2, 3). */
14881
14882 static unsigned
14883 neon_logbits (unsigned x)
14884 {
14885 return ffs (x) - 4;
14886 }
14887
14888 #define LOW4(R) ((R) & 0xf)
14889 #define HI1(R) (((R) >> 4) & 1)
14890
14891 /* Encode insns with bit pattern:
14892
14893 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14894 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14895
14896 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14897 different meaning for some instruction. */
14898
14899 static void
14900 neon_three_same (int isquad, int ubit, int size)
14901 {
14902 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14903 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14904 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14905 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14906 inst.instruction |= LOW4 (inst.operands[2].reg);
14907 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14908 inst.instruction |= (isquad != 0) << 6;
14909 inst.instruction |= (ubit != 0) << 24;
14910 if (size != -1)
14911 inst.instruction |= neon_logbits (size) << 20;
14912
14913 neon_dp_fixup (&inst);
14914 }
14915
14916 /* Encode instructions of the form:
14917
14918 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14919 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14920
14921 Don't write size if SIZE == -1. */
14922
14923 static void
14924 neon_two_same (int qbit, int ubit, int size)
14925 {
14926 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14927 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14928 inst.instruction |= LOW4 (inst.operands[1].reg);
14929 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14930 inst.instruction |= (qbit != 0) << 6;
14931 inst.instruction |= (ubit != 0) << 24;
14932
14933 if (size != -1)
14934 inst.instruction |= neon_logbits (size) << 18;
14935
14936 neon_dp_fixup (&inst);
14937 }
14938
14939 /* Neon instruction encoders, in approximate order of appearance. */
14940
14941 static void
14942 do_neon_dyadic_i_su (void)
14943 {
14944 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14945 struct neon_type_el et = neon_check_type (3, rs,
14946 N_EQK, N_EQK, N_SU_32 | N_KEY);
14947 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14948 }
14949
14950 static void
14951 do_neon_dyadic_i64_su (void)
14952 {
14953 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14954 struct neon_type_el et = neon_check_type (3, rs,
14955 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14956 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14957 }
14958
14959 static void
14960 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14961 unsigned immbits)
14962 {
14963 unsigned size = et.size >> 3;
14964 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14965 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14966 inst.instruction |= LOW4 (inst.operands[1].reg);
14967 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14968 inst.instruction |= (isquad != 0) << 6;
14969 inst.instruction |= immbits << 16;
14970 inst.instruction |= (size >> 3) << 7;
14971 inst.instruction |= (size & 0x7) << 19;
14972 if (write_ubit)
14973 inst.instruction |= (uval != 0) << 24;
14974
14975 neon_dp_fixup (&inst);
14976 }
14977
14978 static void
14979 do_neon_shl_imm (void)
14980 {
14981 if (!inst.operands[2].isreg)
14982 {
14983 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14984 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14985 int imm = inst.operands[2].imm;
14986
14987 constraint (imm < 0 || (unsigned)imm >= et.size,
14988 _("immediate out of range for shift"));
14989 NEON_ENCODE (IMMED, inst);
14990 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14991 }
14992 else
14993 {
14994 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14995 struct neon_type_el et = neon_check_type (3, rs,
14996 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14997 unsigned int tmp;
14998
14999 /* VSHL/VQSHL 3-register variants have syntax such as:
15000 vshl.xx Dd, Dm, Dn
15001 whereas other 3-register operations encoded by neon_three_same have
15002 syntax like:
15003 vadd.xx Dd, Dn, Dm
15004 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15005 here. */
15006 tmp = inst.operands[2].reg;
15007 inst.operands[2].reg = inst.operands[1].reg;
15008 inst.operands[1].reg = tmp;
15009 NEON_ENCODE (INTEGER, inst);
15010 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15011 }
15012 }
15013
15014 static void
15015 do_neon_qshl_imm (void)
15016 {
15017 if (!inst.operands[2].isreg)
15018 {
15019 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15020 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15021 int imm = inst.operands[2].imm;
15022
15023 constraint (imm < 0 || (unsigned)imm >= et.size,
15024 _("immediate out of range for shift"));
15025 NEON_ENCODE (IMMED, inst);
15026 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15027 }
15028 else
15029 {
15030 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15031 struct neon_type_el et = neon_check_type (3, rs,
15032 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15033 unsigned int tmp;
15034
15035 /* See note in do_neon_shl_imm. */
15036 tmp = inst.operands[2].reg;
15037 inst.operands[2].reg = inst.operands[1].reg;
15038 inst.operands[1].reg = tmp;
15039 NEON_ENCODE (INTEGER, inst);
15040 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15041 }
15042 }
15043
15044 static void
15045 do_neon_rshl (void)
15046 {
15047 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15048 struct neon_type_el et = neon_check_type (3, rs,
15049 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15050 unsigned int tmp;
15051
15052 tmp = inst.operands[2].reg;
15053 inst.operands[2].reg = inst.operands[1].reg;
15054 inst.operands[1].reg = tmp;
15055 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15056 }
15057
15058 static int
15059 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15060 {
15061 /* Handle .I8 pseudo-instructions. */
15062 if (size == 8)
15063 {
15064 /* Unfortunately, this will make everything apart from zero out-of-range.
15065 FIXME is this the intended semantics? There doesn't seem much point in
15066 accepting .I8 if so. */
15067 immediate |= immediate << 8;
15068 size = 16;
15069 }
15070
15071 if (size >= 32)
15072 {
15073 if (immediate == (immediate & 0x000000ff))
15074 {
15075 *immbits = immediate;
15076 return 0x1;
15077 }
15078 else if (immediate == (immediate & 0x0000ff00))
15079 {
15080 *immbits = immediate >> 8;
15081 return 0x3;
15082 }
15083 else if (immediate == (immediate & 0x00ff0000))
15084 {
15085 *immbits = immediate >> 16;
15086 return 0x5;
15087 }
15088 else if (immediate == (immediate & 0xff000000))
15089 {
15090 *immbits = immediate >> 24;
15091 return 0x7;
15092 }
15093 if ((immediate & 0xffff) != (immediate >> 16))
15094 goto bad_immediate;
15095 immediate &= 0xffff;
15096 }
15097
15098 if (immediate == (immediate & 0x000000ff))
15099 {
15100 *immbits = immediate;
15101 return 0x9;
15102 }
15103 else if (immediate == (immediate & 0x0000ff00))
15104 {
15105 *immbits = immediate >> 8;
15106 return 0xb;
15107 }
15108
15109 bad_immediate:
15110 first_error (_("immediate value out of range"));
15111 return FAIL;
15112 }
15113
15114 static void
15115 do_neon_logic (void)
15116 {
15117 if (inst.operands[2].present && inst.operands[2].isreg)
15118 {
15119 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15120 neon_check_type (3, rs, N_IGNORE_TYPE);
15121 /* U bit and size field were set as part of the bitmask. */
15122 NEON_ENCODE (INTEGER, inst);
15123 neon_three_same (neon_quad (rs), 0, -1);
15124 }
15125 else
15126 {
15127 const int three_ops_form = (inst.operands[2].present
15128 && !inst.operands[2].isreg);
15129 const int immoperand = (three_ops_form ? 2 : 1);
15130 enum neon_shape rs = (three_ops_form
15131 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15132 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15133 struct neon_type_el et = neon_check_type (2, rs,
15134 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15135 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15136 unsigned immbits;
15137 int cmode;
15138
15139 if (et.type == NT_invtype)
15140 return;
15141
15142 if (three_ops_form)
15143 constraint (inst.operands[0].reg != inst.operands[1].reg,
15144 _("first and second operands shall be the same register"));
15145
15146 NEON_ENCODE (IMMED, inst);
15147
15148 immbits = inst.operands[immoperand].imm;
15149 if (et.size == 64)
15150 {
15151 /* .i64 is a pseudo-op, so the immediate must be a repeating
15152 pattern. */
15153 if (immbits != (inst.operands[immoperand].regisimm ?
15154 inst.operands[immoperand].reg : 0))
15155 {
15156 /* Set immbits to an invalid constant. */
15157 immbits = 0xdeadbeef;
15158 }
15159 }
15160
15161 switch (opcode)
15162 {
15163 case N_MNEM_vbic:
15164 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15165 break;
15166
15167 case N_MNEM_vorr:
15168 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15169 break;
15170
15171 case N_MNEM_vand:
15172 /* Pseudo-instruction for VBIC. */
15173 neon_invert_size (&immbits, 0, et.size);
15174 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15175 break;
15176
15177 case N_MNEM_vorn:
15178 /* Pseudo-instruction for VORR. */
15179 neon_invert_size (&immbits, 0, et.size);
15180 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15181 break;
15182
15183 default:
15184 abort ();
15185 }
15186
15187 if (cmode == FAIL)
15188 return;
15189
15190 inst.instruction |= neon_quad (rs) << 6;
15191 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15192 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15193 inst.instruction |= cmode << 8;
15194 neon_write_immbits (immbits);
15195
15196 neon_dp_fixup (&inst);
15197 }
15198 }
15199
15200 static void
15201 do_neon_bitfield (void)
15202 {
15203 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15204 neon_check_type (3, rs, N_IGNORE_TYPE);
15205 neon_three_same (neon_quad (rs), 0, -1);
15206 }
15207
15208 static void
15209 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15210 unsigned destbits)
15211 {
15212 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15213 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15214 types | N_KEY);
15215 if (et.type == NT_float)
15216 {
15217 NEON_ENCODE (FLOAT, inst);
15218 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15219 }
15220 else
15221 {
15222 NEON_ENCODE (INTEGER, inst);
15223 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15224 }
15225 }
15226
15227 static void
15228 do_neon_dyadic_if_su (void)
15229 {
15230 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15231 }
15232
15233 static void
15234 do_neon_dyadic_if_su_d (void)
15235 {
15236 /* This version only allow D registers, but that constraint is enforced during
15237 operand parsing so we don't need to do anything extra here. */
15238 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15239 }
15240
15241 static void
15242 do_neon_dyadic_if_i_d (void)
15243 {
15244 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15245 affected if we specify unsigned args. */
15246 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15247 }
15248
15249 enum vfp_or_neon_is_neon_bits
15250 {
15251 NEON_CHECK_CC = 1,
15252 NEON_CHECK_ARCH = 2,
15253 NEON_CHECK_ARCH8 = 4
15254 };
15255
15256 /* Call this function if an instruction which may have belonged to the VFP or
15257 Neon instruction sets, but turned out to be a Neon instruction (due to the
15258 operand types involved, etc.). We have to check and/or fix-up a couple of
15259 things:
15260
15261 - Make sure the user hasn't attempted to make a Neon instruction
15262 conditional.
15263 - Alter the value in the condition code field if necessary.
15264 - Make sure that the arch supports Neon instructions.
15265
15266 Which of these operations take place depends on bits from enum
15267 vfp_or_neon_is_neon_bits.
15268
15269 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15270 current instruction's condition is COND_ALWAYS, the condition field is
15271 changed to inst.uncond_value. This is necessary because instructions shared
15272 between VFP and Neon may be conditional for the VFP variants only, and the
15273 unconditional Neon version must have, e.g., 0xF in the condition field. */
15274
15275 static int
15276 vfp_or_neon_is_neon (unsigned check)
15277 {
15278 /* Conditions are always legal in Thumb mode (IT blocks). */
15279 if (!thumb_mode && (check & NEON_CHECK_CC))
15280 {
15281 if (inst.cond != COND_ALWAYS)
15282 {
15283 first_error (_(BAD_COND));
15284 return FAIL;
15285 }
15286 if (inst.uncond_value != -1)
15287 inst.instruction |= inst.uncond_value << 28;
15288 }
15289
15290 if ((check & NEON_CHECK_ARCH)
15291 && !mark_feature_used (&fpu_neon_ext_v1))
15292 {
15293 first_error (_(BAD_FPU));
15294 return FAIL;
15295 }
15296
15297 if ((check & NEON_CHECK_ARCH8)
15298 && !mark_feature_used (&fpu_neon_ext_armv8))
15299 {
15300 first_error (_(BAD_FPU));
15301 return FAIL;
15302 }
15303
15304 return SUCCESS;
15305 }
15306
15307 static void
15308 do_neon_addsub_if_i (void)
15309 {
15310 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15311 return;
15312
15313 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15314 return;
15315
15316 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15317 affected if we specify unsigned args. */
15318 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15319 }
15320
15321 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15322 result to be:
15323 V<op> A,B (A is operand 0, B is operand 2)
15324 to mean:
15325 V<op> A,B,A
15326 not:
15327 V<op> A,B,B
15328 so handle that case specially. */
15329
15330 static void
15331 neon_exchange_operands (void)
15332 {
15333 if (inst.operands[1].present)
15334 {
15335 void *scratch = xmalloc (sizeof (inst.operands[0]));
15336
15337 /* Swap operands[1] and operands[2]. */
15338 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15339 inst.operands[1] = inst.operands[2];
15340 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15341 free (scratch);
15342 }
15343 else
15344 {
15345 inst.operands[1] = inst.operands[2];
15346 inst.operands[2] = inst.operands[0];
15347 }
15348 }
15349
15350 static void
15351 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15352 {
15353 if (inst.operands[2].isreg)
15354 {
15355 if (invert)
15356 neon_exchange_operands ();
15357 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15358 }
15359 else
15360 {
15361 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15362 struct neon_type_el et = neon_check_type (2, rs,
15363 N_EQK | N_SIZ, immtypes | N_KEY);
15364
15365 NEON_ENCODE (IMMED, inst);
15366 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15367 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15368 inst.instruction |= LOW4 (inst.operands[1].reg);
15369 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15370 inst.instruction |= neon_quad (rs) << 6;
15371 inst.instruction |= (et.type == NT_float) << 10;
15372 inst.instruction |= neon_logbits (et.size) << 18;
15373
15374 neon_dp_fixup (&inst);
15375 }
15376 }
15377
15378 static void
15379 do_neon_cmp (void)
15380 {
15381 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15382 }
15383
15384 static void
15385 do_neon_cmp_inv (void)
15386 {
15387 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15388 }
15389
15390 static void
15391 do_neon_ceq (void)
15392 {
15393 neon_compare (N_IF_32, N_IF_32, FALSE);
15394 }
15395
15396 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15397 scalars, which are encoded in 5 bits, M : Rm.
15398 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15399 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15400 index in M.
15401
15402 Dot Product instructions are similar to multiply instructions except elsize
15403 should always be 32.
15404
15405 This function translates SCALAR, which is GAS's internal encoding of indexed
15406 scalar register, to raw encoding. There is also register and index range
15407 check based on ELSIZE. */
15408
15409 static unsigned
15410 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15411 {
15412 unsigned regno = NEON_SCALAR_REG (scalar);
15413 unsigned elno = NEON_SCALAR_INDEX (scalar);
15414
15415 switch (elsize)
15416 {
15417 case 16:
15418 if (regno > 7 || elno > 3)
15419 goto bad_scalar;
15420 return regno | (elno << 3);
15421
15422 case 32:
15423 if (regno > 15 || elno > 1)
15424 goto bad_scalar;
15425 return regno | (elno << 4);
15426
15427 default:
15428 bad_scalar:
15429 first_error (_("scalar out of range for multiply instruction"));
15430 }
15431
15432 return 0;
15433 }
15434
15435 /* Encode multiply / multiply-accumulate scalar instructions. */
15436
15437 static void
15438 neon_mul_mac (struct neon_type_el et, int ubit)
15439 {
15440 unsigned scalar;
15441
15442 /* Give a more helpful error message if we have an invalid type. */
15443 if (et.type == NT_invtype)
15444 return;
15445
15446 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15447 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15448 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15449 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15450 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15451 inst.instruction |= LOW4 (scalar);
15452 inst.instruction |= HI1 (scalar) << 5;
15453 inst.instruction |= (et.type == NT_float) << 8;
15454 inst.instruction |= neon_logbits (et.size) << 20;
15455 inst.instruction |= (ubit != 0) << 24;
15456
15457 neon_dp_fixup (&inst);
15458 }
15459
15460 static void
15461 do_neon_mac_maybe_scalar (void)
15462 {
15463 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15464 return;
15465
15466 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15467 return;
15468
15469 if (inst.operands[2].isscalar)
15470 {
15471 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15472 struct neon_type_el et = neon_check_type (3, rs,
15473 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15474 NEON_ENCODE (SCALAR, inst);
15475 neon_mul_mac (et, neon_quad (rs));
15476 }
15477 else
15478 {
15479 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15480 affected if we specify unsigned args. */
15481 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15482 }
15483 }
15484
15485 static void
15486 do_neon_fmac (void)
15487 {
15488 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15489 return;
15490
15491 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15492 return;
15493
15494 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15495 }
15496
15497 static void
15498 do_neon_tst (void)
15499 {
15500 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15501 struct neon_type_el et = neon_check_type (3, rs,
15502 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15503 neon_three_same (neon_quad (rs), 0, et.size);
15504 }
15505
15506 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15507 same types as the MAC equivalents. The polynomial type for this instruction
15508 is encoded the same as the integer type. */
15509
15510 static void
15511 do_neon_mul (void)
15512 {
15513 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15514 return;
15515
15516 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15517 return;
15518
15519 if (inst.operands[2].isscalar)
15520 do_neon_mac_maybe_scalar ();
15521 else
15522 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15523 }
15524
15525 static void
15526 do_neon_qdmulh (void)
15527 {
15528 if (inst.operands[2].isscalar)
15529 {
15530 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15531 struct neon_type_el et = neon_check_type (3, rs,
15532 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15533 NEON_ENCODE (SCALAR, inst);
15534 neon_mul_mac (et, neon_quad (rs));
15535 }
15536 else
15537 {
15538 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15539 struct neon_type_el et = neon_check_type (3, rs,
15540 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15541 NEON_ENCODE (INTEGER, inst);
15542 /* The U bit (rounding) comes from bit mask. */
15543 neon_three_same (neon_quad (rs), 0, et.size);
15544 }
15545 }
15546
15547 static void
15548 do_neon_qrdmlah (void)
15549 {
15550 /* Check we're on the correct architecture. */
15551 if (!mark_feature_used (&fpu_neon_ext_armv8))
15552 inst.error =
15553 _("instruction form not available on this architecture.");
15554 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15555 {
15556 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15557 record_feature_use (&fpu_neon_ext_v8_1);
15558 }
15559
15560 if (inst.operands[2].isscalar)
15561 {
15562 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15563 struct neon_type_el et = neon_check_type (3, rs,
15564 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15565 NEON_ENCODE (SCALAR, inst);
15566 neon_mul_mac (et, neon_quad (rs));
15567 }
15568 else
15569 {
15570 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15571 struct neon_type_el et = neon_check_type (3, rs,
15572 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15573 NEON_ENCODE (INTEGER, inst);
15574 /* The U bit (rounding) comes from bit mask. */
15575 neon_three_same (neon_quad (rs), 0, et.size);
15576 }
15577 }
15578
15579 static void
15580 do_neon_fcmp_absolute (void)
15581 {
15582 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15583 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15584 N_F_16_32 | N_KEY);
15585 /* Size field comes from bit mask. */
15586 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15587 }
15588
15589 static void
15590 do_neon_fcmp_absolute_inv (void)
15591 {
15592 neon_exchange_operands ();
15593 do_neon_fcmp_absolute ();
15594 }
15595
15596 static void
15597 do_neon_step (void)
15598 {
15599 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15600 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15601 N_F_16_32 | N_KEY);
15602 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15603 }
15604
15605 static void
15606 do_neon_abs_neg (void)
15607 {
15608 enum neon_shape rs;
15609 struct neon_type_el et;
15610
15611 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15612 return;
15613
15614 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15615 return;
15616
15617 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15618 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15619
15620 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15621 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15622 inst.instruction |= LOW4 (inst.operands[1].reg);
15623 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15624 inst.instruction |= neon_quad (rs) << 6;
15625 inst.instruction |= (et.type == NT_float) << 10;
15626 inst.instruction |= neon_logbits (et.size) << 18;
15627
15628 neon_dp_fixup (&inst);
15629 }
15630
15631 static void
15632 do_neon_sli (void)
15633 {
15634 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15635 struct neon_type_el et = neon_check_type (2, rs,
15636 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15637 int imm = inst.operands[2].imm;
15638 constraint (imm < 0 || (unsigned)imm >= et.size,
15639 _("immediate out of range for insert"));
15640 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15641 }
15642
15643 static void
15644 do_neon_sri (void)
15645 {
15646 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15647 struct neon_type_el et = neon_check_type (2, rs,
15648 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15649 int imm = inst.operands[2].imm;
15650 constraint (imm < 1 || (unsigned)imm > et.size,
15651 _("immediate out of range for insert"));
15652 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15653 }
15654
15655 static void
15656 do_neon_qshlu_imm (void)
15657 {
15658 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15659 struct neon_type_el et = neon_check_type (2, rs,
15660 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15661 int imm = inst.operands[2].imm;
15662 constraint (imm < 0 || (unsigned)imm >= et.size,
15663 _("immediate out of range for shift"));
15664 /* Only encodes the 'U present' variant of the instruction.
15665 In this case, signed types have OP (bit 8) set to 0.
15666 Unsigned types have OP set to 1. */
15667 inst.instruction |= (et.type == NT_unsigned) << 8;
15668 /* The rest of the bits are the same as other immediate shifts. */
15669 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15670 }
15671
15672 static void
15673 do_neon_qmovn (void)
15674 {
15675 struct neon_type_el et = neon_check_type (2, NS_DQ,
15676 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15677 /* Saturating move where operands can be signed or unsigned, and the
15678 destination has the same signedness. */
15679 NEON_ENCODE (INTEGER, inst);
15680 if (et.type == NT_unsigned)
15681 inst.instruction |= 0xc0;
15682 else
15683 inst.instruction |= 0x80;
15684 neon_two_same (0, 1, et.size / 2);
15685 }
15686
15687 static void
15688 do_neon_qmovun (void)
15689 {
15690 struct neon_type_el et = neon_check_type (2, NS_DQ,
15691 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15692 /* Saturating move with unsigned results. Operands must be signed. */
15693 NEON_ENCODE (INTEGER, inst);
15694 neon_two_same (0, 1, et.size / 2);
15695 }
15696
15697 static void
15698 do_neon_rshift_sat_narrow (void)
15699 {
15700 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15701 or unsigned. If operands are unsigned, results must also be unsigned. */
15702 struct neon_type_el et = neon_check_type (2, NS_DQI,
15703 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15704 int imm = inst.operands[2].imm;
15705 /* This gets the bounds check, size encoding and immediate bits calculation
15706 right. */
15707 et.size /= 2;
15708
15709 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15710 VQMOVN.I<size> <Dd>, <Qm>. */
15711 if (imm == 0)
15712 {
15713 inst.operands[2].present = 0;
15714 inst.instruction = N_MNEM_vqmovn;
15715 do_neon_qmovn ();
15716 return;
15717 }
15718
15719 constraint (imm < 1 || (unsigned)imm > et.size,
15720 _("immediate out of range"));
15721 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15722 }
15723
15724 static void
15725 do_neon_rshift_sat_narrow_u (void)
15726 {
15727 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15728 or unsigned. If operands are unsigned, results must also be unsigned. */
15729 struct neon_type_el et = neon_check_type (2, NS_DQI,
15730 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15731 int imm = inst.operands[2].imm;
15732 /* This gets the bounds check, size encoding and immediate bits calculation
15733 right. */
15734 et.size /= 2;
15735
15736 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15737 VQMOVUN.I<size> <Dd>, <Qm>. */
15738 if (imm == 0)
15739 {
15740 inst.operands[2].present = 0;
15741 inst.instruction = N_MNEM_vqmovun;
15742 do_neon_qmovun ();
15743 return;
15744 }
15745
15746 constraint (imm < 1 || (unsigned)imm > et.size,
15747 _("immediate out of range"));
15748 /* FIXME: The manual is kind of unclear about what value U should have in
15749 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15750 must be 1. */
15751 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15752 }
15753
15754 static void
15755 do_neon_movn (void)
15756 {
15757 struct neon_type_el et = neon_check_type (2, NS_DQ,
15758 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15759 NEON_ENCODE (INTEGER, inst);
15760 neon_two_same (0, 1, et.size / 2);
15761 }
15762
15763 static void
15764 do_neon_rshift_narrow (void)
15765 {
15766 struct neon_type_el et = neon_check_type (2, NS_DQI,
15767 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15768 int imm = inst.operands[2].imm;
15769 /* This gets the bounds check, size encoding and immediate bits calculation
15770 right. */
15771 et.size /= 2;
15772
15773 /* If immediate is zero then we are a pseudo-instruction for
15774 VMOVN.I<size> <Dd>, <Qm> */
15775 if (imm == 0)
15776 {
15777 inst.operands[2].present = 0;
15778 inst.instruction = N_MNEM_vmovn;
15779 do_neon_movn ();
15780 return;
15781 }
15782
15783 constraint (imm < 1 || (unsigned)imm > et.size,
15784 _("immediate out of range for narrowing operation"));
15785 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15786 }
15787
15788 static void
15789 do_neon_shll (void)
15790 {
15791 /* FIXME: Type checking when lengthening. */
15792 struct neon_type_el et = neon_check_type (2, NS_QDI,
15793 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15794 unsigned imm = inst.operands[2].imm;
15795
15796 if (imm == et.size)
15797 {
15798 /* Maximum shift variant. */
15799 NEON_ENCODE (INTEGER, inst);
15800 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15801 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15802 inst.instruction |= LOW4 (inst.operands[1].reg);
15803 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15804 inst.instruction |= neon_logbits (et.size) << 18;
15805
15806 neon_dp_fixup (&inst);
15807 }
15808 else
15809 {
15810 /* A more-specific type check for non-max versions. */
15811 et = neon_check_type (2, NS_QDI,
15812 N_EQK | N_DBL, N_SU_32 | N_KEY);
15813 NEON_ENCODE (IMMED, inst);
15814 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15815 }
15816 }
15817
15818 /* Check the various types for the VCVT instruction, and return which version
15819 the current instruction is. */
15820
15821 #define CVT_FLAVOUR_VAR \
15822 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15823 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15824 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15825 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15826 /* Half-precision conversions. */ \
15827 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15828 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15829 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15830 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15831 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15832 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15833 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15834 Compared with single/double precision variants, only the co-processor \
15835 field is different, so the encoding flow is reused here. */ \
15836 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15837 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15838 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15839 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15840 /* VFP instructions. */ \
15841 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15842 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15843 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15844 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15845 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15846 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15847 /* VFP instructions with bitshift. */ \
15848 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15849 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15850 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15851 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15852 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15853 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15854 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15855 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15856
15857 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15858 neon_cvt_flavour_##C,
15859
15860 /* The different types of conversions we can do. */
15861 enum neon_cvt_flavour
15862 {
15863 CVT_FLAVOUR_VAR
15864 neon_cvt_flavour_invalid,
15865 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15866 };
15867
15868 #undef CVT_VAR
15869
15870 static enum neon_cvt_flavour
15871 get_neon_cvt_flavour (enum neon_shape rs)
15872 {
15873 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15874 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15875 if (et.type != NT_invtype) \
15876 { \
15877 inst.error = NULL; \
15878 return (neon_cvt_flavour_##C); \
15879 }
15880
15881 struct neon_type_el et;
15882 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15883 || rs == NS_FF) ? N_VFP : 0;
15884 /* The instruction versions which take an immediate take one register
15885 argument, which is extended to the width of the full register. Thus the
15886 "source" and "destination" registers must have the same width. Hack that
15887 here by making the size equal to the key (wider, in this case) operand. */
15888 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15889
15890 CVT_FLAVOUR_VAR;
15891
15892 return neon_cvt_flavour_invalid;
15893 #undef CVT_VAR
15894 }
15895
15896 enum neon_cvt_mode
15897 {
15898 neon_cvt_mode_a,
15899 neon_cvt_mode_n,
15900 neon_cvt_mode_p,
15901 neon_cvt_mode_m,
15902 neon_cvt_mode_z,
15903 neon_cvt_mode_x,
15904 neon_cvt_mode_r
15905 };
15906
15907 /* Neon-syntax VFP conversions. */
15908
15909 static void
15910 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15911 {
15912 const char *opname = 0;
15913
15914 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15915 || rs == NS_FHI || rs == NS_HFI)
15916 {
15917 /* Conversions with immediate bitshift. */
15918 const char *enc[] =
15919 {
15920 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15921 CVT_FLAVOUR_VAR
15922 NULL
15923 #undef CVT_VAR
15924 };
15925
15926 if (flavour < (int) ARRAY_SIZE (enc))
15927 {
15928 opname = enc[flavour];
15929 constraint (inst.operands[0].reg != inst.operands[1].reg,
15930 _("operands 0 and 1 must be the same register"));
15931 inst.operands[1] = inst.operands[2];
15932 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15933 }
15934 }
15935 else
15936 {
15937 /* Conversions without bitshift. */
15938 const char *enc[] =
15939 {
15940 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15941 CVT_FLAVOUR_VAR
15942 NULL
15943 #undef CVT_VAR
15944 };
15945
15946 if (flavour < (int) ARRAY_SIZE (enc))
15947 opname = enc[flavour];
15948 }
15949
15950 if (opname)
15951 do_vfp_nsyn_opcode (opname);
15952
15953 /* ARMv8.2 fp16 VCVT instruction. */
15954 if (flavour == neon_cvt_flavour_s32_f16
15955 || flavour == neon_cvt_flavour_u32_f16
15956 || flavour == neon_cvt_flavour_f16_u32
15957 || flavour == neon_cvt_flavour_f16_s32)
15958 do_scalar_fp16_v82_encode ();
15959 }
15960
15961 static void
15962 do_vfp_nsyn_cvtz (void)
15963 {
15964 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15965 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15966 const char *enc[] =
15967 {
15968 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15969 CVT_FLAVOUR_VAR
15970 NULL
15971 #undef CVT_VAR
15972 };
15973
15974 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15975 do_vfp_nsyn_opcode (enc[flavour]);
15976 }
15977
15978 static void
15979 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15980 enum neon_cvt_mode mode)
15981 {
15982 int sz, op;
15983 int rm;
15984
15985 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15986 D register operands. */
15987 if (flavour == neon_cvt_flavour_s32_f64
15988 || flavour == neon_cvt_flavour_u32_f64)
15989 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15990 _(BAD_FPU));
15991
15992 if (flavour == neon_cvt_flavour_s32_f16
15993 || flavour == neon_cvt_flavour_u32_f16)
15994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15995 _(BAD_FP16));
15996
15997 set_it_insn_type (OUTSIDE_IT_INSN);
15998
15999 switch (flavour)
16000 {
16001 case neon_cvt_flavour_s32_f64:
16002 sz = 1;
16003 op = 1;
16004 break;
16005 case neon_cvt_flavour_s32_f32:
16006 sz = 0;
16007 op = 1;
16008 break;
16009 case neon_cvt_flavour_s32_f16:
16010 sz = 0;
16011 op = 1;
16012 break;
16013 case neon_cvt_flavour_u32_f64:
16014 sz = 1;
16015 op = 0;
16016 break;
16017 case neon_cvt_flavour_u32_f32:
16018 sz = 0;
16019 op = 0;
16020 break;
16021 case neon_cvt_flavour_u32_f16:
16022 sz = 0;
16023 op = 0;
16024 break;
16025 default:
16026 first_error (_("invalid instruction shape"));
16027 return;
16028 }
16029
16030 switch (mode)
16031 {
16032 case neon_cvt_mode_a: rm = 0; break;
16033 case neon_cvt_mode_n: rm = 1; break;
16034 case neon_cvt_mode_p: rm = 2; break;
16035 case neon_cvt_mode_m: rm = 3; break;
16036 default: first_error (_("invalid rounding mode")); return;
16037 }
16038
16039 NEON_ENCODE (FPV8, inst);
16040 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16041 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16042 inst.instruction |= sz << 8;
16043
16044 /* ARMv8.2 fp16 VCVT instruction. */
16045 if (flavour == neon_cvt_flavour_s32_f16
16046 ||flavour == neon_cvt_flavour_u32_f16)
16047 do_scalar_fp16_v82_encode ();
16048 inst.instruction |= op << 7;
16049 inst.instruction |= rm << 16;
16050 inst.instruction |= 0xf0000000;
16051 inst.is_neon = TRUE;
16052 }
16053
16054 static void
16055 do_neon_cvt_1 (enum neon_cvt_mode mode)
16056 {
16057 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16058 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16059 NS_FH, NS_HF, NS_FHI, NS_HFI,
16060 NS_NULL);
16061 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16062
16063 if (flavour == neon_cvt_flavour_invalid)
16064 return;
16065
16066 /* PR11109: Handle round-to-zero for VCVT conversions. */
16067 if (mode == neon_cvt_mode_z
16068 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16069 && (flavour == neon_cvt_flavour_s16_f16
16070 || flavour == neon_cvt_flavour_u16_f16
16071 || flavour == neon_cvt_flavour_s32_f32
16072 || flavour == neon_cvt_flavour_u32_f32
16073 || flavour == neon_cvt_flavour_s32_f64
16074 || flavour == neon_cvt_flavour_u32_f64)
16075 && (rs == NS_FD || rs == NS_FF))
16076 {
16077 do_vfp_nsyn_cvtz ();
16078 return;
16079 }
16080
16081 /* ARMv8.2 fp16 VCVT conversions. */
16082 if (mode == neon_cvt_mode_z
16083 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
16084 && (flavour == neon_cvt_flavour_s32_f16
16085 || flavour == neon_cvt_flavour_u32_f16)
16086 && (rs == NS_FH))
16087 {
16088 do_vfp_nsyn_cvtz ();
16089 do_scalar_fp16_v82_encode ();
16090 return;
16091 }
16092
16093 /* VFP rather than Neon conversions. */
16094 if (flavour >= neon_cvt_flavour_first_fp)
16095 {
16096 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16097 do_vfp_nsyn_cvt (rs, flavour);
16098 else
16099 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16100
16101 return;
16102 }
16103
16104 switch (rs)
16105 {
16106 case NS_DDI:
16107 case NS_QQI:
16108 {
16109 unsigned immbits;
16110 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16111 0x0000100, 0x1000100, 0x0, 0x1000000};
16112
16113 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16114 return;
16115
16116 /* Fixed-point conversion with #0 immediate is encoded as an
16117 integer conversion. */
16118 if (inst.operands[2].present && inst.operands[2].imm == 0)
16119 goto int_encode;
16120 NEON_ENCODE (IMMED, inst);
16121 if (flavour != neon_cvt_flavour_invalid)
16122 inst.instruction |= enctab[flavour];
16123 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16124 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16125 inst.instruction |= LOW4 (inst.operands[1].reg);
16126 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16127 inst.instruction |= neon_quad (rs) << 6;
16128 inst.instruction |= 1 << 21;
16129 if (flavour < neon_cvt_flavour_s16_f16)
16130 {
16131 inst.instruction |= 1 << 21;
16132 immbits = 32 - inst.operands[2].imm;
16133 inst.instruction |= immbits << 16;
16134 }
16135 else
16136 {
16137 inst.instruction |= 3 << 20;
16138 immbits = 16 - inst.operands[2].imm;
16139 inst.instruction |= immbits << 16;
16140 inst.instruction &= ~(1 << 9);
16141 }
16142
16143 neon_dp_fixup (&inst);
16144 }
16145 break;
16146
16147 case NS_DD:
16148 case NS_QQ:
16149 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
16150 {
16151 NEON_ENCODE (FLOAT, inst);
16152 set_it_insn_type (OUTSIDE_IT_INSN);
16153
16154 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16155 return;
16156
16157 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16158 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16159 inst.instruction |= LOW4 (inst.operands[1].reg);
16160 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16161 inst.instruction |= neon_quad (rs) << 6;
16162 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
16163 || flavour == neon_cvt_flavour_u32_f32) << 7;
16164 inst.instruction |= mode << 8;
16165 if (flavour == neon_cvt_flavour_u16_f16
16166 || flavour == neon_cvt_flavour_s16_f16)
16167 /* Mask off the original size bits and reencode them. */
16168 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
16169
16170 if (thumb_mode)
16171 inst.instruction |= 0xfc000000;
16172 else
16173 inst.instruction |= 0xf0000000;
16174 }
16175 else
16176 {
16177 int_encode:
16178 {
16179 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
16180 0x100, 0x180, 0x0, 0x080};
16181
16182 NEON_ENCODE (INTEGER, inst);
16183
16184 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16185 return;
16186
16187 if (flavour != neon_cvt_flavour_invalid)
16188 inst.instruction |= enctab[flavour];
16189
16190 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16191 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16192 inst.instruction |= LOW4 (inst.operands[1].reg);
16193 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16194 inst.instruction |= neon_quad (rs) << 6;
16195 if (flavour >= neon_cvt_flavour_s16_f16
16196 && flavour <= neon_cvt_flavour_f16_u16)
16197 /* Half precision. */
16198 inst.instruction |= 1 << 18;
16199 else
16200 inst.instruction |= 2 << 18;
16201
16202 neon_dp_fixup (&inst);
16203 }
16204 }
16205 break;
16206
16207 /* Half-precision conversions for Advanced SIMD -- neon. */
16208 case NS_QD:
16209 case NS_DQ:
16210 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16211 return;
16212
16213 if ((rs == NS_DQ)
16214 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
16215 {
16216 as_bad (_("operand size must match register width"));
16217 break;
16218 }
16219
16220 if ((rs == NS_QD)
16221 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
16222 {
16223 as_bad (_("operand size must match register width"));
16224 break;
16225 }
16226
16227 if (rs == NS_DQ)
16228 inst.instruction = 0x3b60600;
16229 else
16230 inst.instruction = 0x3b60700;
16231
16232 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16233 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16234 inst.instruction |= LOW4 (inst.operands[1].reg);
16235 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16236 neon_dp_fixup (&inst);
16237 break;
16238
16239 default:
16240 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16241 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16242 do_vfp_nsyn_cvt (rs, flavour);
16243 else
16244 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16245 }
16246 }
16247
16248 static void
16249 do_neon_cvtr (void)
16250 {
16251 do_neon_cvt_1 (neon_cvt_mode_x);
16252 }
16253
16254 static void
16255 do_neon_cvt (void)
16256 {
16257 do_neon_cvt_1 (neon_cvt_mode_z);
16258 }
16259
16260 static void
16261 do_neon_cvta (void)
16262 {
16263 do_neon_cvt_1 (neon_cvt_mode_a);
16264 }
16265
16266 static void
16267 do_neon_cvtn (void)
16268 {
16269 do_neon_cvt_1 (neon_cvt_mode_n);
16270 }
16271
16272 static void
16273 do_neon_cvtp (void)
16274 {
16275 do_neon_cvt_1 (neon_cvt_mode_p);
16276 }
16277
16278 static void
16279 do_neon_cvtm (void)
16280 {
16281 do_neon_cvt_1 (neon_cvt_mode_m);
16282 }
16283
16284 static void
16285 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
16286 {
16287 if (is_double)
16288 mark_feature_used (&fpu_vfp_ext_armv8);
16289
16290 encode_arm_vfp_reg (inst.operands[0].reg,
16291 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
16292 encode_arm_vfp_reg (inst.operands[1].reg,
16293 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
16294 inst.instruction |= to ? 0x10000 : 0;
16295 inst.instruction |= t ? 0x80 : 0;
16296 inst.instruction |= is_double ? 0x100 : 0;
16297 do_vfp_cond_or_thumb ();
16298 }
16299
16300 static void
16301 do_neon_cvttb_1 (bfd_boolean t)
16302 {
16303 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16304 NS_DF, NS_DH, NS_NULL);
16305
16306 if (rs == NS_NULL)
16307 return;
16308 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16309 {
16310 inst.error = NULL;
16311 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16312 }
16313 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16314 {
16315 inst.error = NULL;
16316 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16317 }
16318 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16319 {
16320 /* The VCVTB and VCVTT instructions with D-register operands
16321 don't work for SP only targets. */
16322 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16323 _(BAD_FPU));
16324
16325 inst.error = NULL;
16326 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16327 }
16328 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16329 {
16330 /* The VCVTB and VCVTT instructions with D-register operands
16331 don't work for SP only targets. */
16332 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16333 _(BAD_FPU));
16334
16335 inst.error = NULL;
16336 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16337 }
16338 else
16339 return;
16340 }
16341
16342 static void
16343 do_neon_cvtb (void)
16344 {
16345 do_neon_cvttb_1 (FALSE);
16346 }
16347
16348
16349 static void
16350 do_neon_cvtt (void)
16351 {
16352 do_neon_cvttb_1 (TRUE);
16353 }
16354
16355 static void
16356 neon_move_immediate (void)
16357 {
16358 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16359 struct neon_type_el et = neon_check_type (2, rs,
16360 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16361 unsigned immlo, immhi = 0, immbits;
16362 int op, cmode, float_p;
16363
16364 constraint (et.type == NT_invtype,
16365 _("operand size must be specified for immediate VMOV"));
16366
16367 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16368 op = (inst.instruction & (1 << 5)) != 0;
16369
16370 immlo = inst.operands[1].imm;
16371 if (inst.operands[1].regisimm)
16372 immhi = inst.operands[1].reg;
16373
16374 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16375 _("immediate has bits set outside the operand size"));
16376
16377 float_p = inst.operands[1].immisfloat;
16378
16379 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16380 et.size, et.type)) == FAIL)
16381 {
16382 /* Invert relevant bits only. */
16383 neon_invert_size (&immlo, &immhi, et.size);
16384 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16385 with one or the other; those cases are caught by
16386 neon_cmode_for_move_imm. */
16387 op = !op;
16388 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16389 &op, et.size, et.type)) == FAIL)
16390 {
16391 first_error (_("immediate out of range"));
16392 return;
16393 }
16394 }
16395
16396 inst.instruction &= ~(1 << 5);
16397 inst.instruction |= op << 5;
16398
16399 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16400 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16401 inst.instruction |= neon_quad (rs) << 6;
16402 inst.instruction |= cmode << 8;
16403
16404 neon_write_immbits (immbits);
16405 }
16406
16407 static void
16408 do_neon_mvn (void)
16409 {
16410 if (inst.operands[1].isreg)
16411 {
16412 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16413
16414 NEON_ENCODE (INTEGER, inst);
16415 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16416 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16417 inst.instruction |= LOW4 (inst.operands[1].reg);
16418 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16419 inst.instruction |= neon_quad (rs) << 6;
16420 }
16421 else
16422 {
16423 NEON_ENCODE (IMMED, inst);
16424 neon_move_immediate ();
16425 }
16426
16427 neon_dp_fixup (&inst);
16428 }
16429
16430 /* Encode instructions of form:
16431
16432 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16433 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16434
16435 static void
16436 neon_mixed_length (struct neon_type_el et, unsigned size)
16437 {
16438 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16439 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16440 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16441 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16442 inst.instruction |= LOW4 (inst.operands[2].reg);
16443 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16444 inst.instruction |= (et.type == NT_unsigned) << 24;
16445 inst.instruction |= neon_logbits (size) << 20;
16446
16447 neon_dp_fixup (&inst);
16448 }
16449
16450 static void
16451 do_neon_dyadic_long (void)
16452 {
16453 /* FIXME: Type checking for lengthening op. */
16454 struct neon_type_el et = neon_check_type (3, NS_QDD,
16455 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16456 neon_mixed_length (et, et.size);
16457 }
16458
16459 static void
16460 do_neon_abal (void)
16461 {
16462 struct neon_type_el et = neon_check_type (3, NS_QDD,
16463 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16464 neon_mixed_length (et, et.size);
16465 }
16466
16467 static void
16468 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16469 {
16470 if (inst.operands[2].isscalar)
16471 {
16472 struct neon_type_el et = neon_check_type (3, NS_QDS,
16473 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16474 NEON_ENCODE (SCALAR, inst);
16475 neon_mul_mac (et, et.type == NT_unsigned);
16476 }
16477 else
16478 {
16479 struct neon_type_el et = neon_check_type (3, NS_QDD,
16480 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16481 NEON_ENCODE (INTEGER, inst);
16482 neon_mixed_length (et, et.size);
16483 }
16484 }
16485
16486 static void
16487 do_neon_mac_maybe_scalar_long (void)
16488 {
16489 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16490 }
16491
16492 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16493 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16494
16495 static unsigned
16496 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16497 {
16498 unsigned regno = NEON_SCALAR_REG (scalar);
16499 unsigned elno = NEON_SCALAR_INDEX (scalar);
16500
16501 if (quad_p)
16502 {
16503 if (regno > 7 || elno > 3)
16504 goto bad_scalar;
16505
16506 return ((regno & 0x7)
16507 | ((elno & 0x1) << 3)
16508 | (((elno >> 1) & 0x1) << 5));
16509 }
16510 else
16511 {
16512 if (regno > 15 || elno > 1)
16513 goto bad_scalar;
16514
16515 return (((regno & 0x1) << 5)
16516 | ((regno >> 1) & 0x7)
16517 | ((elno & 0x1) << 3));
16518 }
16519
16520 bad_scalar:
16521 first_error (_("scalar out of range for multiply instruction"));
16522 return 0;
16523 }
16524
16525 static void
16526 do_neon_fmac_maybe_scalar_long (int subtype)
16527 {
16528 enum neon_shape rs;
16529 int high8;
16530 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16531 field (bits[21:20]) has different meaning. For scalar index variant, it's
16532 used to differentiate add and subtract, otherwise it's with fixed value
16533 0x2. */
16534 int size = -1;
16535
16536 if (inst.cond != COND_ALWAYS)
16537 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16538 "behaviour is UNPREDICTABLE"));
16539
16540 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16541 _(BAD_FP16));
16542
16543 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16544 _(BAD_FPU));
16545
16546 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16547 be a scalar index register. */
16548 if (inst.operands[2].isscalar)
16549 {
16550 high8 = 0xfe000000;
16551 if (subtype)
16552 size = 16;
16553 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16554 }
16555 else
16556 {
16557 high8 = 0xfc000000;
16558 size = 32;
16559 if (subtype)
16560 inst.instruction |= (0x1 << 23);
16561 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16562 }
16563
16564 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16565
16566 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16567 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16568 so we simply pass -1 as size. */
16569 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16570 neon_three_same (quad_p, 0, size);
16571
16572 /* Undo neon_dp_fixup. Redo the high eight bits. */
16573 inst.instruction &= 0x00ffffff;
16574 inst.instruction |= high8;
16575
16576 #define LOW1(R) ((R) & 0x1)
16577 #define HI4(R) (((R) >> 1) & 0xf)
16578 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16579 whether the instruction is in Q form and whether Vm is a scalar indexed
16580 operand. */
16581 if (inst.operands[2].isscalar)
16582 {
16583 unsigned rm
16584 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16585 inst.instruction &= 0xffffffd0;
16586 inst.instruction |= rm;
16587
16588 if (!quad_p)
16589 {
16590 /* Redo Rn as well. */
16591 inst.instruction &= 0xfff0ff7f;
16592 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16593 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16594 }
16595 }
16596 else if (!quad_p)
16597 {
16598 /* Redo Rn and Rm. */
16599 inst.instruction &= 0xfff0ff50;
16600 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16601 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16602 inst.instruction |= HI4 (inst.operands[2].reg);
16603 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16604 }
16605 }
16606
16607 static void
16608 do_neon_vfmal (void)
16609 {
16610 return do_neon_fmac_maybe_scalar_long (0);
16611 }
16612
16613 static void
16614 do_neon_vfmsl (void)
16615 {
16616 return do_neon_fmac_maybe_scalar_long (1);
16617 }
16618
16619 static void
16620 do_neon_dyadic_wide (void)
16621 {
16622 struct neon_type_el et = neon_check_type (3, NS_QQD,
16623 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16624 neon_mixed_length (et, et.size);
16625 }
16626
16627 static void
16628 do_neon_dyadic_narrow (void)
16629 {
16630 struct neon_type_el et = neon_check_type (3, NS_QDD,
16631 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16632 /* Operand sign is unimportant, and the U bit is part of the opcode,
16633 so force the operand type to integer. */
16634 et.type = NT_integer;
16635 neon_mixed_length (et, et.size / 2);
16636 }
16637
16638 static void
16639 do_neon_mul_sat_scalar_long (void)
16640 {
16641 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16642 }
16643
16644 static void
16645 do_neon_vmull (void)
16646 {
16647 if (inst.operands[2].isscalar)
16648 do_neon_mac_maybe_scalar_long ();
16649 else
16650 {
16651 struct neon_type_el et = neon_check_type (3, NS_QDD,
16652 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16653
16654 if (et.type == NT_poly)
16655 NEON_ENCODE (POLY, inst);
16656 else
16657 NEON_ENCODE (INTEGER, inst);
16658
16659 /* For polynomial encoding the U bit must be zero, and the size must
16660 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16661 obviously, as 0b10). */
16662 if (et.size == 64)
16663 {
16664 /* Check we're on the correct architecture. */
16665 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16666 inst.error =
16667 _("Instruction form not available on this architecture.");
16668
16669 et.size = 32;
16670 }
16671
16672 neon_mixed_length (et, et.size);
16673 }
16674 }
16675
16676 static void
16677 do_neon_ext (void)
16678 {
16679 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16680 struct neon_type_el et = neon_check_type (3, rs,
16681 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16682 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16683
16684 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16685 _("shift out of range"));
16686 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16687 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16688 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16689 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16690 inst.instruction |= LOW4 (inst.operands[2].reg);
16691 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16692 inst.instruction |= neon_quad (rs) << 6;
16693 inst.instruction |= imm << 8;
16694
16695 neon_dp_fixup (&inst);
16696 }
16697
16698 static void
16699 do_neon_rev (void)
16700 {
16701 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16702 struct neon_type_el et = neon_check_type (2, rs,
16703 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16704 unsigned op = (inst.instruction >> 7) & 3;
16705 /* N (width of reversed regions) is encoded as part of the bitmask. We
16706 extract it here to check the elements to be reversed are smaller.
16707 Otherwise we'd get a reserved instruction. */
16708 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16709 gas_assert (elsize != 0);
16710 constraint (et.size >= elsize,
16711 _("elements must be smaller than reversal region"));
16712 neon_two_same (neon_quad (rs), 1, et.size);
16713 }
16714
16715 static void
16716 do_neon_dup (void)
16717 {
16718 if (inst.operands[1].isscalar)
16719 {
16720 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16721 struct neon_type_el et = neon_check_type (2, rs,
16722 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16723 unsigned sizebits = et.size >> 3;
16724 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16725 int logsize = neon_logbits (et.size);
16726 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16727
16728 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16729 return;
16730
16731 NEON_ENCODE (SCALAR, inst);
16732 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16733 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16734 inst.instruction |= LOW4 (dm);
16735 inst.instruction |= HI1 (dm) << 5;
16736 inst.instruction |= neon_quad (rs) << 6;
16737 inst.instruction |= x << 17;
16738 inst.instruction |= sizebits << 16;
16739
16740 neon_dp_fixup (&inst);
16741 }
16742 else
16743 {
16744 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16745 struct neon_type_el et = neon_check_type (2, rs,
16746 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16747 /* Duplicate ARM register to lanes of vector. */
16748 NEON_ENCODE (ARMREG, inst);
16749 switch (et.size)
16750 {
16751 case 8: inst.instruction |= 0x400000; break;
16752 case 16: inst.instruction |= 0x000020; break;
16753 case 32: inst.instruction |= 0x000000; break;
16754 default: break;
16755 }
16756 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16757 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16758 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16759 inst.instruction |= neon_quad (rs) << 21;
16760 /* The encoding for this instruction is identical for the ARM and Thumb
16761 variants, except for the condition field. */
16762 do_vfp_cond_or_thumb ();
16763 }
16764 }
16765
16766 /* VMOV has particularly many variations. It can be one of:
16767 0. VMOV<c><q> <Qd>, <Qm>
16768 1. VMOV<c><q> <Dd>, <Dm>
16769 (Register operations, which are VORR with Rm = Rn.)
16770 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16771 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16772 (Immediate loads.)
16773 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16774 (ARM register to scalar.)
16775 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16776 (Two ARM registers to vector.)
16777 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16778 (Scalar to ARM register.)
16779 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16780 (Vector to two ARM registers.)
16781 8. VMOV.F32 <Sd>, <Sm>
16782 9. VMOV.F64 <Dd>, <Dm>
16783 (VFP register moves.)
16784 10. VMOV.F32 <Sd>, #imm
16785 11. VMOV.F64 <Dd>, #imm
16786 (VFP float immediate load.)
16787 12. VMOV <Rd>, <Sm>
16788 (VFP single to ARM reg.)
16789 13. VMOV <Sd>, <Rm>
16790 (ARM reg to VFP single.)
16791 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16792 (Two ARM regs to two VFP singles.)
16793 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16794 (Two VFP singles to two ARM regs.)
16795
16796 These cases can be disambiguated using neon_select_shape, except cases 1/9
16797 and 3/11 which depend on the operand type too.
16798
16799 All the encoded bits are hardcoded by this function.
16800
16801 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16802 Cases 5, 7 may be used with VFPv2 and above.
16803
16804 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16805 can specify a type where it doesn't make sense to, and is ignored). */
16806
16807 static void
16808 do_neon_mov (void)
16809 {
16810 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16811 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16812 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16813 NS_HR, NS_RH, NS_HI, NS_NULL);
16814 struct neon_type_el et;
16815 const char *ldconst = 0;
16816
16817 switch (rs)
16818 {
16819 case NS_DD: /* case 1/9. */
16820 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16821 /* It is not an error here if no type is given. */
16822 inst.error = NULL;
16823 if (et.type == NT_float && et.size == 64)
16824 {
16825 do_vfp_nsyn_opcode ("fcpyd");
16826 break;
16827 }
16828 /* fall through. */
16829
16830 case NS_QQ: /* case 0/1. */
16831 {
16832 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16833 return;
16834 /* The architecture manual I have doesn't explicitly state which
16835 value the U bit should have for register->register moves, but
16836 the equivalent VORR instruction has U = 0, so do that. */
16837 inst.instruction = 0x0200110;
16838 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16839 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16840 inst.instruction |= LOW4 (inst.operands[1].reg);
16841 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16842 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16843 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16844 inst.instruction |= neon_quad (rs) << 6;
16845
16846 neon_dp_fixup (&inst);
16847 }
16848 break;
16849
16850 case NS_DI: /* case 3/11. */
16851 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16852 inst.error = NULL;
16853 if (et.type == NT_float && et.size == 64)
16854 {
16855 /* case 11 (fconstd). */
16856 ldconst = "fconstd";
16857 goto encode_fconstd;
16858 }
16859 /* fall through. */
16860
16861 case NS_QI: /* case 2/3. */
16862 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16863 return;
16864 inst.instruction = 0x0800010;
16865 neon_move_immediate ();
16866 neon_dp_fixup (&inst);
16867 break;
16868
16869 case NS_SR: /* case 4. */
16870 {
16871 unsigned bcdebits = 0;
16872 int logsize;
16873 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16874 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16875
16876 /* .<size> is optional here, defaulting to .32. */
16877 if (inst.vectype.elems == 0
16878 && inst.operands[0].vectype.type == NT_invtype
16879 && inst.operands[1].vectype.type == NT_invtype)
16880 {
16881 inst.vectype.el[0].type = NT_untyped;
16882 inst.vectype.el[0].size = 32;
16883 inst.vectype.elems = 1;
16884 }
16885
16886 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16887 logsize = neon_logbits (et.size);
16888
16889 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16890 _(BAD_FPU));
16891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16892 && et.size != 32, _(BAD_FPU));
16893 constraint (et.type == NT_invtype, _("bad type for scalar"));
16894 constraint (x >= 64 / et.size, _("scalar index out of range"));
16895
16896 switch (et.size)
16897 {
16898 case 8: bcdebits = 0x8; break;
16899 case 16: bcdebits = 0x1; break;
16900 case 32: bcdebits = 0x0; break;
16901 default: ;
16902 }
16903
16904 bcdebits |= x << logsize;
16905
16906 inst.instruction = 0xe000b10;
16907 do_vfp_cond_or_thumb ();
16908 inst.instruction |= LOW4 (dn) << 16;
16909 inst.instruction |= HI1 (dn) << 7;
16910 inst.instruction |= inst.operands[1].reg << 12;
16911 inst.instruction |= (bcdebits & 3) << 5;
16912 inst.instruction |= (bcdebits >> 2) << 21;
16913 }
16914 break;
16915
16916 case NS_DRR: /* case 5 (fmdrr). */
16917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16918 _(BAD_FPU));
16919
16920 inst.instruction = 0xc400b10;
16921 do_vfp_cond_or_thumb ();
16922 inst.instruction |= LOW4 (inst.operands[0].reg);
16923 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16924 inst.instruction |= inst.operands[1].reg << 12;
16925 inst.instruction |= inst.operands[2].reg << 16;
16926 break;
16927
16928 case NS_RS: /* case 6. */
16929 {
16930 unsigned logsize;
16931 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16932 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16933 unsigned abcdebits = 0;
16934
16935 /* .<dt> is optional here, defaulting to .32. */
16936 if (inst.vectype.elems == 0
16937 && inst.operands[0].vectype.type == NT_invtype
16938 && inst.operands[1].vectype.type == NT_invtype)
16939 {
16940 inst.vectype.el[0].type = NT_untyped;
16941 inst.vectype.el[0].size = 32;
16942 inst.vectype.elems = 1;
16943 }
16944
16945 et = neon_check_type (2, NS_NULL,
16946 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16947 logsize = neon_logbits (et.size);
16948
16949 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16950 _(BAD_FPU));
16951 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16952 && et.size != 32, _(BAD_FPU));
16953 constraint (et.type == NT_invtype, _("bad type for scalar"));
16954 constraint (x >= 64 / et.size, _("scalar index out of range"));
16955
16956 switch (et.size)
16957 {
16958 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16959 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16960 case 32: abcdebits = 0x00; break;
16961 default: ;
16962 }
16963
16964 abcdebits |= x << logsize;
16965 inst.instruction = 0xe100b10;
16966 do_vfp_cond_or_thumb ();
16967 inst.instruction |= LOW4 (dn) << 16;
16968 inst.instruction |= HI1 (dn) << 7;
16969 inst.instruction |= inst.operands[0].reg << 12;
16970 inst.instruction |= (abcdebits & 3) << 5;
16971 inst.instruction |= (abcdebits >> 2) << 21;
16972 }
16973 break;
16974
16975 case NS_RRD: /* case 7 (fmrrd). */
16976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16977 _(BAD_FPU));
16978
16979 inst.instruction = 0xc500b10;
16980 do_vfp_cond_or_thumb ();
16981 inst.instruction |= inst.operands[0].reg << 12;
16982 inst.instruction |= inst.operands[1].reg << 16;
16983 inst.instruction |= LOW4 (inst.operands[2].reg);
16984 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16985 break;
16986
16987 case NS_FF: /* case 8 (fcpys). */
16988 do_vfp_nsyn_opcode ("fcpys");
16989 break;
16990
16991 case NS_HI:
16992 case NS_FI: /* case 10 (fconsts). */
16993 ldconst = "fconsts";
16994 encode_fconstd:
16995 if (!inst.operands[1].immisfloat)
16996 {
16997 unsigned new_imm;
16998 /* Immediate has to fit in 8 bits so float is enough. */
16999 float imm = (float) inst.operands[1].imm;
17000 memcpy (&new_imm, &imm, sizeof (float));
17001 /* But the assembly may have been written to provide an integer
17002 bit pattern that equates to a float, so check that the
17003 conversion has worked. */
17004 if (is_quarter_float (new_imm))
17005 {
17006 if (is_quarter_float (inst.operands[1].imm))
17007 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17008
17009 inst.operands[1].imm = new_imm;
17010 inst.operands[1].immisfloat = 1;
17011 }
17012 }
17013
17014 if (is_quarter_float (inst.operands[1].imm))
17015 {
17016 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17017 do_vfp_nsyn_opcode (ldconst);
17018
17019 /* ARMv8.2 fp16 vmov.f16 instruction. */
17020 if (rs == NS_HI)
17021 do_scalar_fp16_v82_encode ();
17022 }
17023 else
17024 first_error (_("immediate out of range"));
17025 break;
17026
17027 case NS_RH:
17028 case NS_RF: /* case 12 (fmrs). */
17029 do_vfp_nsyn_opcode ("fmrs");
17030 /* ARMv8.2 fp16 vmov.f16 instruction. */
17031 if (rs == NS_RH)
17032 do_scalar_fp16_v82_encode ();
17033 break;
17034
17035 case NS_HR:
17036 case NS_FR: /* case 13 (fmsr). */
17037 do_vfp_nsyn_opcode ("fmsr");
17038 /* ARMv8.2 fp16 vmov.f16 instruction. */
17039 if (rs == NS_HR)
17040 do_scalar_fp16_v82_encode ();
17041 break;
17042
17043 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17044 (one of which is a list), but we have parsed four. Do some fiddling to
17045 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17046 expect. */
17047 case NS_RRFF: /* case 14 (fmrrs). */
17048 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
17049 _("VFP registers must be adjacent"));
17050 inst.operands[2].imm = 2;
17051 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17052 do_vfp_nsyn_opcode ("fmrrs");
17053 break;
17054
17055 case NS_FFRR: /* case 15 (fmsrr). */
17056 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
17057 _("VFP registers must be adjacent"));
17058 inst.operands[1] = inst.operands[2];
17059 inst.operands[2] = inst.operands[3];
17060 inst.operands[0].imm = 2;
17061 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17062 do_vfp_nsyn_opcode ("fmsrr");
17063 break;
17064
17065 case NS_NULL:
17066 /* neon_select_shape has determined that the instruction
17067 shape is wrong and has already set the error message. */
17068 break;
17069
17070 default:
17071 abort ();
17072 }
17073 }
17074
17075 static void
17076 do_neon_rshift_round_imm (void)
17077 {
17078 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17079 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17080 int imm = inst.operands[2].imm;
17081
17082 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17083 if (imm == 0)
17084 {
17085 inst.operands[2].present = 0;
17086 do_neon_mov ();
17087 return;
17088 }
17089
17090 constraint (imm < 1 || (unsigned)imm > et.size,
17091 _("immediate out of range for shift"));
17092 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
17093 et.size - imm);
17094 }
17095
17096 static void
17097 do_neon_movhf (void)
17098 {
17099 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
17100 constraint (rs != NS_HH, _("invalid suffix"));
17101
17102 if (inst.cond != COND_ALWAYS)
17103 {
17104 if (thumb_mode)
17105 {
17106 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17107 " the behaviour is UNPREDICTABLE"));
17108 }
17109 else
17110 {
17111 inst.error = BAD_COND;
17112 return;
17113 }
17114 }
17115
17116 do_vfp_sp_monadic ();
17117
17118 inst.is_neon = 1;
17119 inst.instruction |= 0xf0000000;
17120 }
17121
17122 static void
17123 do_neon_movl (void)
17124 {
17125 struct neon_type_el et = neon_check_type (2, NS_QD,
17126 N_EQK | N_DBL, N_SU_32 | N_KEY);
17127 unsigned sizebits = et.size >> 3;
17128 inst.instruction |= sizebits << 19;
17129 neon_two_same (0, et.type == NT_unsigned, -1);
17130 }
17131
17132 static void
17133 do_neon_trn (void)
17134 {
17135 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17136 struct neon_type_el et = neon_check_type (2, rs,
17137 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17138 NEON_ENCODE (INTEGER, inst);
17139 neon_two_same (neon_quad (rs), 1, et.size);
17140 }
17141
17142 static void
17143 do_neon_zip_uzp (void)
17144 {
17145 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17146 struct neon_type_el et = neon_check_type (2, rs,
17147 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17148 if (rs == NS_DD && et.size == 32)
17149 {
17150 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17151 inst.instruction = N_MNEM_vtrn;
17152 do_neon_trn ();
17153 return;
17154 }
17155 neon_two_same (neon_quad (rs), 1, et.size);
17156 }
17157
17158 static void
17159 do_neon_sat_abs_neg (void)
17160 {
17161 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17162 struct neon_type_el et = neon_check_type (2, rs,
17163 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17164 neon_two_same (neon_quad (rs), 1, et.size);
17165 }
17166
17167 static void
17168 do_neon_pair_long (void)
17169 {
17170 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17171 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
17172 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17173 inst.instruction |= (et.type == NT_unsigned) << 7;
17174 neon_two_same (neon_quad (rs), 1, et.size);
17175 }
17176
17177 static void
17178 do_neon_recip_est (void)
17179 {
17180 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17181 struct neon_type_el et = neon_check_type (2, rs,
17182 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
17183 inst.instruction |= (et.type == NT_float) << 8;
17184 neon_two_same (neon_quad (rs), 1, et.size);
17185 }
17186
17187 static void
17188 do_neon_cls (void)
17189 {
17190 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17191 struct neon_type_el et = neon_check_type (2, rs,
17192 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17193 neon_two_same (neon_quad (rs), 1, et.size);
17194 }
17195
17196 static void
17197 do_neon_clz (void)
17198 {
17199 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17200 struct neon_type_el et = neon_check_type (2, rs,
17201 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
17202 neon_two_same (neon_quad (rs), 1, et.size);
17203 }
17204
17205 static void
17206 do_neon_cnt (void)
17207 {
17208 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17209 struct neon_type_el et = neon_check_type (2, rs,
17210 N_EQK | N_INT, N_8 | N_KEY);
17211 neon_two_same (neon_quad (rs), 1, et.size);
17212 }
17213
17214 static void
17215 do_neon_swp (void)
17216 {
17217 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17218 neon_two_same (neon_quad (rs), 1, -1);
17219 }
17220
17221 static void
17222 do_neon_tbl_tbx (void)
17223 {
17224 unsigned listlenbits;
17225 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
17226
17227 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
17228 {
17229 first_error (_("bad list length for table lookup"));
17230 return;
17231 }
17232
17233 listlenbits = inst.operands[1].imm - 1;
17234 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17235 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17236 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17237 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17238 inst.instruction |= LOW4 (inst.operands[2].reg);
17239 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17240 inst.instruction |= listlenbits << 8;
17241
17242 neon_dp_fixup (&inst);
17243 }
17244
17245 static void
17246 do_neon_ldm_stm (void)
17247 {
17248 /* P, U and L bits are part of bitmask. */
17249 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
17250 unsigned offsetbits = inst.operands[1].imm * 2;
17251
17252 if (inst.operands[1].issingle)
17253 {
17254 do_vfp_nsyn_ldm_stm (is_dbmode);
17255 return;
17256 }
17257
17258 constraint (is_dbmode && !inst.operands[0].writeback,
17259 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17260
17261 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
17262 _("register list must contain at least 1 and at most 16 "
17263 "registers"));
17264
17265 inst.instruction |= inst.operands[0].reg << 16;
17266 inst.instruction |= inst.operands[0].writeback << 21;
17267 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17268 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
17269
17270 inst.instruction |= offsetbits;
17271
17272 do_vfp_cond_or_thumb ();
17273 }
17274
17275 static void
17276 do_neon_ldr_str (void)
17277 {
17278 int is_ldr = (inst.instruction & (1 << 20)) != 0;
17279
17280 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17281 And is UNPREDICTABLE in thumb mode. */
17282 if (!is_ldr
17283 && inst.operands[1].reg == REG_PC
17284 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
17285 {
17286 if (thumb_mode)
17287 inst.error = _("Use of PC here is UNPREDICTABLE");
17288 else if (warn_on_deprecated)
17289 as_tsktsk (_("Use of PC here is deprecated"));
17290 }
17291
17292 if (inst.operands[0].issingle)
17293 {
17294 if (is_ldr)
17295 do_vfp_nsyn_opcode ("flds");
17296 else
17297 do_vfp_nsyn_opcode ("fsts");
17298
17299 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17300 if (inst.vectype.el[0].size == 16)
17301 do_scalar_fp16_v82_encode ();
17302 }
17303 else
17304 {
17305 if (is_ldr)
17306 do_vfp_nsyn_opcode ("fldd");
17307 else
17308 do_vfp_nsyn_opcode ("fstd");
17309 }
17310 }
17311
17312 /* "interleave" version also handles non-interleaving register VLD1/VST1
17313 instructions. */
17314
17315 static void
17316 do_neon_ld_st_interleave (void)
17317 {
17318 struct neon_type_el et = neon_check_type (1, NS_NULL,
17319 N_8 | N_16 | N_32 | N_64);
17320 unsigned alignbits = 0;
17321 unsigned idx;
17322 /* The bits in this table go:
17323 0: register stride of one (0) or two (1)
17324 1,2: register list length, minus one (1, 2, 3, 4).
17325 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17326 We use -1 for invalid entries. */
17327 const int typetable[] =
17328 {
17329 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17330 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17331 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17332 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17333 };
17334 int typebits;
17335
17336 if (et.type == NT_invtype)
17337 return;
17338
17339 if (inst.operands[1].immisalign)
17340 switch (inst.operands[1].imm >> 8)
17341 {
17342 case 64: alignbits = 1; break;
17343 case 128:
17344 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17345 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17346 goto bad_alignment;
17347 alignbits = 2;
17348 break;
17349 case 256:
17350 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17351 goto bad_alignment;
17352 alignbits = 3;
17353 break;
17354 default:
17355 bad_alignment:
17356 first_error (_("bad alignment"));
17357 return;
17358 }
17359
17360 inst.instruction |= alignbits << 4;
17361 inst.instruction |= neon_logbits (et.size) << 6;
17362
17363 /* Bits [4:6] of the immediate in a list specifier encode register stride
17364 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17365 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17366 up the right value for "type" in a table based on this value and the given
17367 list style, then stick it back. */
17368 idx = ((inst.operands[0].imm >> 4) & 7)
17369 | (((inst.instruction >> 8) & 3) << 3);
17370
17371 typebits = typetable[idx];
17372
17373 constraint (typebits == -1, _("bad list type for instruction"));
17374 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17375 _("bad element type for instruction"));
17376
17377 inst.instruction &= ~0xf00;
17378 inst.instruction |= typebits << 8;
17379 }
17380
17381 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17382 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17383 otherwise. The variable arguments are a list of pairs of legal (size, align)
17384 values, terminated with -1. */
17385
17386 static int
17387 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17388 {
17389 va_list ap;
17390 int result = FAIL, thissize, thisalign;
17391
17392 if (!inst.operands[1].immisalign)
17393 {
17394 *do_alignment = 0;
17395 return SUCCESS;
17396 }
17397
17398 va_start (ap, do_alignment);
17399
17400 do
17401 {
17402 thissize = va_arg (ap, int);
17403 if (thissize == -1)
17404 break;
17405 thisalign = va_arg (ap, int);
17406
17407 if (size == thissize && align == thisalign)
17408 result = SUCCESS;
17409 }
17410 while (result != SUCCESS);
17411
17412 va_end (ap);
17413
17414 if (result == SUCCESS)
17415 *do_alignment = 1;
17416 else
17417 first_error (_("unsupported alignment for instruction"));
17418
17419 return result;
17420 }
17421
17422 static void
17423 do_neon_ld_st_lane (void)
17424 {
17425 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17426 int align_good, do_alignment = 0;
17427 int logsize = neon_logbits (et.size);
17428 int align = inst.operands[1].imm >> 8;
17429 int n = (inst.instruction >> 8) & 3;
17430 int max_el = 64 / et.size;
17431
17432 if (et.type == NT_invtype)
17433 return;
17434
17435 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17436 _("bad list length"));
17437 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17438 _("scalar index out of range"));
17439 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17440 && et.size == 8,
17441 _("stride of 2 unavailable when element size is 8"));
17442
17443 switch (n)
17444 {
17445 case 0: /* VLD1 / VST1. */
17446 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17447 32, 32, -1);
17448 if (align_good == FAIL)
17449 return;
17450 if (do_alignment)
17451 {
17452 unsigned alignbits = 0;
17453 switch (et.size)
17454 {
17455 case 16: alignbits = 0x1; break;
17456 case 32: alignbits = 0x3; break;
17457 default: ;
17458 }
17459 inst.instruction |= alignbits << 4;
17460 }
17461 break;
17462
17463 case 1: /* VLD2 / VST2. */
17464 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17465 16, 32, 32, 64, -1);
17466 if (align_good == FAIL)
17467 return;
17468 if (do_alignment)
17469 inst.instruction |= 1 << 4;
17470 break;
17471
17472 case 2: /* VLD3 / VST3. */
17473 constraint (inst.operands[1].immisalign,
17474 _("can't use alignment with this instruction"));
17475 break;
17476
17477 case 3: /* VLD4 / VST4. */
17478 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17479 16, 64, 32, 64, 32, 128, -1);
17480 if (align_good == FAIL)
17481 return;
17482 if (do_alignment)
17483 {
17484 unsigned alignbits = 0;
17485 switch (et.size)
17486 {
17487 case 8: alignbits = 0x1; break;
17488 case 16: alignbits = 0x1; break;
17489 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17490 default: ;
17491 }
17492 inst.instruction |= alignbits << 4;
17493 }
17494 break;
17495
17496 default: ;
17497 }
17498
17499 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17500 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17501 inst.instruction |= 1 << (4 + logsize);
17502
17503 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17504 inst.instruction |= logsize << 10;
17505 }
17506
17507 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17508
17509 static void
17510 do_neon_ld_dup (void)
17511 {
17512 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17513 int align_good, do_alignment = 0;
17514
17515 if (et.type == NT_invtype)
17516 return;
17517
17518 switch ((inst.instruction >> 8) & 3)
17519 {
17520 case 0: /* VLD1. */
17521 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17522 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17523 &do_alignment, 16, 16, 32, 32, -1);
17524 if (align_good == FAIL)
17525 return;
17526 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17527 {
17528 case 1: break;
17529 case 2: inst.instruction |= 1 << 5; break;
17530 default: first_error (_("bad list length")); return;
17531 }
17532 inst.instruction |= neon_logbits (et.size) << 6;
17533 break;
17534
17535 case 1: /* VLD2. */
17536 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17537 &do_alignment, 8, 16, 16, 32, 32, 64,
17538 -1);
17539 if (align_good == FAIL)
17540 return;
17541 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17542 _("bad list length"));
17543 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17544 inst.instruction |= 1 << 5;
17545 inst.instruction |= neon_logbits (et.size) << 6;
17546 break;
17547
17548 case 2: /* VLD3. */
17549 constraint (inst.operands[1].immisalign,
17550 _("can't use alignment with this instruction"));
17551 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17552 _("bad list length"));
17553 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17554 inst.instruction |= 1 << 5;
17555 inst.instruction |= neon_logbits (et.size) << 6;
17556 break;
17557
17558 case 3: /* VLD4. */
17559 {
17560 int align = inst.operands[1].imm >> 8;
17561 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17562 16, 64, 32, 64, 32, 128, -1);
17563 if (align_good == FAIL)
17564 return;
17565 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17566 _("bad list length"));
17567 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17568 inst.instruction |= 1 << 5;
17569 if (et.size == 32 && align == 128)
17570 inst.instruction |= 0x3 << 6;
17571 else
17572 inst.instruction |= neon_logbits (et.size) << 6;
17573 }
17574 break;
17575
17576 default: ;
17577 }
17578
17579 inst.instruction |= do_alignment << 4;
17580 }
17581
17582 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17583 apart from bits [11:4]. */
17584
17585 static void
17586 do_neon_ldx_stx (void)
17587 {
17588 if (inst.operands[1].isreg)
17589 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17590
17591 switch (NEON_LANE (inst.operands[0].imm))
17592 {
17593 case NEON_INTERLEAVE_LANES:
17594 NEON_ENCODE (INTERLV, inst);
17595 do_neon_ld_st_interleave ();
17596 break;
17597
17598 case NEON_ALL_LANES:
17599 NEON_ENCODE (DUP, inst);
17600 if (inst.instruction == N_INV)
17601 {
17602 first_error ("only loads support such operands");
17603 break;
17604 }
17605 do_neon_ld_dup ();
17606 break;
17607
17608 default:
17609 NEON_ENCODE (LANE, inst);
17610 do_neon_ld_st_lane ();
17611 }
17612
17613 /* L bit comes from bit mask. */
17614 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17615 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17616 inst.instruction |= inst.operands[1].reg << 16;
17617
17618 if (inst.operands[1].postind)
17619 {
17620 int postreg = inst.operands[1].imm & 0xf;
17621 constraint (!inst.operands[1].immisreg,
17622 _("post-index must be a register"));
17623 constraint (postreg == 0xd || postreg == 0xf,
17624 _("bad register for post-index"));
17625 inst.instruction |= postreg;
17626 }
17627 else
17628 {
17629 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17630 constraint (inst.relocs[0].exp.X_op != O_constant
17631 || inst.relocs[0].exp.X_add_number != 0,
17632 BAD_ADDR_MODE);
17633
17634 if (inst.operands[1].writeback)
17635 {
17636 inst.instruction |= 0xd;
17637 }
17638 else
17639 inst.instruction |= 0xf;
17640 }
17641
17642 if (thumb_mode)
17643 inst.instruction |= 0xf9000000;
17644 else
17645 inst.instruction |= 0xf4000000;
17646 }
17647
17648 /* FP v8. */
17649 static void
17650 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17651 {
17652 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17653 D register operands. */
17654 if (neon_shape_class[rs] == SC_DOUBLE)
17655 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17656 _(BAD_FPU));
17657
17658 NEON_ENCODE (FPV8, inst);
17659
17660 if (rs == NS_FFF || rs == NS_HHH)
17661 {
17662 do_vfp_sp_dyadic ();
17663
17664 /* ARMv8.2 fp16 instruction. */
17665 if (rs == NS_HHH)
17666 do_scalar_fp16_v82_encode ();
17667 }
17668 else
17669 do_vfp_dp_rd_rn_rm ();
17670
17671 if (rs == NS_DDD)
17672 inst.instruction |= 0x100;
17673
17674 inst.instruction |= 0xf0000000;
17675 }
17676
17677 static void
17678 do_vsel (void)
17679 {
17680 set_it_insn_type (OUTSIDE_IT_INSN);
17681
17682 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17683 first_error (_("invalid instruction shape"));
17684 }
17685
17686 static void
17687 do_vmaxnm (void)
17688 {
17689 set_it_insn_type (OUTSIDE_IT_INSN);
17690
17691 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17692 return;
17693
17694 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17695 return;
17696
17697 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17698 }
17699
17700 static void
17701 do_vrint_1 (enum neon_cvt_mode mode)
17702 {
17703 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17704 struct neon_type_el et;
17705
17706 if (rs == NS_NULL)
17707 return;
17708
17709 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17710 D register operands. */
17711 if (neon_shape_class[rs] == SC_DOUBLE)
17712 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17713 _(BAD_FPU));
17714
17715 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17716 | N_VFP);
17717 if (et.type != NT_invtype)
17718 {
17719 /* VFP encodings. */
17720 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17721 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17722 set_it_insn_type (OUTSIDE_IT_INSN);
17723
17724 NEON_ENCODE (FPV8, inst);
17725 if (rs == NS_FF || rs == NS_HH)
17726 do_vfp_sp_monadic ();
17727 else
17728 do_vfp_dp_rd_rm ();
17729
17730 switch (mode)
17731 {
17732 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17733 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17734 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17735 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17736 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17737 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17738 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17739 default: abort ();
17740 }
17741
17742 inst.instruction |= (rs == NS_DD) << 8;
17743 do_vfp_cond_or_thumb ();
17744
17745 /* ARMv8.2 fp16 vrint instruction. */
17746 if (rs == NS_HH)
17747 do_scalar_fp16_v82_encode ();
17748 }
17749 else
17750 {
17751 /* Neon encodings (or something broken...). */
17752 inst.error = NULL;
17753 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17754
17755 if (et.type == NT_invtype)
17756 return;
17757
17758 set_it_insn_type (OUTSIDE_IT_INSN);
17759 NEON_ENCODE (FLOAT, inst);
17760
17761 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17762 return;
17763
17764 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17765 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17766 inst.instruction |= LOW4 (inst.operands[1].reg);
17767 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17768 inst.instruction |= neon_quad (rs) << 6;
17769 /* Mask off the original size bits and reencode them. */
17770 inst.instruction = ((inst.instruction & 0xfff3ffff)
17771 | neon_logbits (et.size) << 18);
17772
17773 switch (mode)
17774 {
17775 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17776 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17777 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17778 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17779 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17780 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17781 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17782 default: abort ();
17783 }
17784
17785 if (thumb_mode)
17786 inst.instruction |= 0xfc000000;
17787 else
17788 inst.instruction |= 0xf0000000;
17789 }
17790 }
17791
17792 static void
17793 do_vrintx (void)
17794 {
17795 do_vrint_1 (neon_cvt_mode_x);
17796 }
17797
17798 static void
17799 do_vrintz (void)
17800 {
17801 do_vrint_1 (neon_cvt_mode_z);
17802 }
17803
17804 static void
17805 do_vrintr (void)
17806 {
17807 do_vrint_1 (neon_cvt_mode_r);
17808 }
17809
17810 static void
17811 do_vrinta (void)
17812 {
17813 do_vrint_1 (neon_cvt_mode_a);
17814 }
17815
17816 static void
17817 do_vrintn (void)
17818 {
17819 do_vrint_1 (neon_cvt_mode_n);
17820 }
17821
17822 static void
17823 do_vrintp (void)
17824 {
17825 do_vrint_1 (neon_cvt_mode_p);
17826 }
17827
17828 static void
17829 do_vrintm (void)
17830 {
17831 do_vrint_1 (neon_cvt_mode_m);
17832 }
17833
17834 static unsigned
17835 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17836 {
17837 unsigned regno = NEON_SCALAR_REG (opnd);
17838 unsigned elno = NEON_SCALAR_INDEX (opnd);
17839
17840 if (elsize == 16 && elno < 2 && regno < 16)
17841 return regno | (elno << 4);
17842 else if (elsize == 32 && elno == 0)
17843 return regno;
17844
17845 first_error (_("scalar out of range"));
17846 return 0;
17847 }
17848
17849 static void
17850 do_vcmla (void)
17851 {
17852 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17853 _(BAD_FPU));
17854 constraint (inst.relocs[0].exp.X_op != O_constant,
17855 _("expression too complex"));
17856 unsigned rot = inst.relocs[0].exp.X_add_number;
17857 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17858 _("immediate out of range"));
17859 rot /= 90;
17860 if (inst.operands[2].isscalar)
17861 {
17862 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17863 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17864 N_KEY | N_F16 | N_F32).size;
17865 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17866 inst.is_neon = 1;
17867 inst.instruction = 0xfe000800;
17868 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17869 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17870 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17871 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17872 inst.instruction |= LOW4 (m);
17873 inst.instruction |= HI1 (m) << 5;
17874 inst.instruction |= neon_quad (rs) << 6;
17875 inst.instruction |= rot << 20;
17876 inst.instruction |= (size == 32) << 23;
17877 }
17878 else
17879 {
17880 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17881 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17882 N_KEY | N_F16 | N_F32).size;
17883 neon_three_same (neon_quad (rs), 0, -1);
17884 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17885 inst.instruction |= 0xfc200800;
17886 inst.instruction |= rot << 23;
17887 inst.instruction |= (size == 32) << 20;
17888 }
17889 }
17890
17891 static void
17892 do_vcadd (void)
17893 {
17894 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17895 _(BAD_FPU));
17896 constraint (inst.relocs[0].exp.X_op != O_constant,
17897 _("expression too complex"));
17898 unsigned rot = inst.relocs[0].exp.X_add_number;
17899 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17900 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17901 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17902 N_KEY | N_F16 | N_F32).size;
17903 neon_three_same (neon_quad (rs), 0, -1);
17904 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17905 inst.instruction |= 0xfc800800;
17906 inst.instruction |= (rot == 270) << 24;
17907 inst.instruction |= (size == 32) << 20;
17908 }
17909
17910 /* Dot Product instructions encoding support. */
17911
17912 static void
17913 do_neon_dotproduct (int unsigned_p)
17914 {
17915 enum neon_shape rs;
17916 unsigned scalar_oprd2 = 0;
17917 int high8;
17918
17919 if (inst.cond != COND_ALWAYS)
17920 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17921 "is UNPREDICTABLE"));
17922
17923 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17924 _(BAD_FPU));
17925
17926 /* Dot Product instructions are in three-same D/Q register format or the third
17927 operand can be a scalar index register. */
17928 if (inst.operands[2].isscalar)
17929 {
17930 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17931 high8 = 0xfe000000;
17932 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17933 }
17934 else
17935 {
17936 high8 = 0xfc000000;
17937 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17938 }
17939
17940 if (unsigned_p)
17941 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17942 else
17943 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17944
17945 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17946 Product instruction, so we pass 0 as the "ubit" parameter. And the
17947 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17948 neon_three_same (neon_quad (rs), 0, 32);
17949
17950 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17951 different NEON three-same encoding. */
17952 inst.instruction &= 0x00ffffff;
17953 inst.instruction |= high8;
17954 /* Encode 'U' bit which indicates signedness. */
17955 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17956 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17957 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17958 the instruction encoding. */
17959 if (inst.operands[2].isscalar)
17960 {
17961 inst.instruction &= 0xffffffd0;
17962 inst.instruction |= LOW4 (scalar_oprd2);
17963 inst.instruction |= HI1 (scalar_oprd2) << 5;
17964 }
17965 }
17966
17967 /* Dot Product instructions for signed integer. */
17968
17969 static void
17970 do_neon_dotproduct_s (void)
17971 {
17972 return do_neon_dotproduct (0);
17973 }
17974
17975 /* Dot Product instructions for unsigned integer. */
17976
17977 static void
17978 do_neon_dotproduct_u (void)
17979 {
17980 return do_neon_dotproduct (1);
17981 }
17982
17983 /* Crypto v1 instructions. */
17984 static void
17985 do_crypto_2op_1 (unsigned elttype, int op)
17986 {
17987 set_it_insn_type (OUTSIDE_IT_INSN);
17988
17989 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17990 == NT_invtype)
17991 return;
17992
17993 inst.error = NULL;
17994
17995 NEON_ENCODE (INTEGER, inst);
17996 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17997 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17998 inst.instruction |= LOW4 (inst.operands[1].reg);
17999 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18000 if (op != -1)
18001 inst.instruction |= op << 6;
18002
18003 if (thumb_mode)
18004 inst.instruction |= 0xfc000000;
18005 else
18006 inst.instruction |= 0xf0000000;
18007 }
18008
18009 static void
18010 do_crypto_3op_1 (int u, int op)
18011 {
18012 set_it_insn_type (OUTSIDE_IT_INSN);
18013
18014 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
18015 N_32 | N_UNT | N_KEY).type == NT_invtype)
18016 return;
18017
18018 inst.error = NULL;
18019
18020 NEON_ENCODE (INTEGER, inst);
18021 neon_three_same (1, u, 8 << op);
18022 }
18023
18024 static void
18025 do_aese (void)
18026 {
18027 do_crypto_2op_1 (N_8, 0);
18028 }
18029
18030 static void
18031 do_aesd (void)
18032 {
18033 do_crypto_2op_1 (N_8, 1);
18034 }
18035
18036 static void
18037 do_aesmc (void)
18038 {
18039 do_crypto_2op_1 (N_8, 2);
18040 }
18041
18042 static void
18043 do_aesimc (void)
18044 {
18045 do_crypto_2op_1 (N_8, 3);
18046 }
18047
18048 static void
18049 do_sha1c (void)
18050 {
18051 do_crypto_3op_1 (0, 0);
18052 }
18053
18054 static void
18055 do_sha1p (void)
18056 {
18057 do_crypto_3op_1 (0, 1);
18058 }
18059
18060 static void
18061 do_sha1m (void)
18062 {
18063 do_crypto_3op_1 (0, 2);
18064 }
18065
18066 static void
18067 do_sha1su0 (void)
18068 {
18069 do_crypto_3op_1 (0, 3);
18070 }
18071
18072 static void
18073 do_sha256h (void)
18074 {
18075 do_crypto_3op_1 (1, 0);
18076 }
18077
18078 static void
18079 do_sha256h2 (void)
18080 {
18081 do_crypto_3op_1 (1, 1);
18082 }
18083
18084 static void
18085 do_sha256su1 (void)
18086 {
18087 do_crypto_3op_1 (1, 2);
18088 }
18089
18090 static void
18091 do_sha1h (void)
18092 {
18093 do_crypto_2op_1 (N_32, -1);
18094 }
18095
18096 static void
18097 do_sha1su1 (void)
18098 {
18099 do_crypto_2op_1 (N_32, 0);
18100 }
18101
18102 static void
18103 do_sha256su0 (void)
18104 {
18105 do_crypto_2op_1 (N_32, 1);
18106 }
18107
18108 static void
18109 do_crc32_1 (unsigned int poly, unsigned int sz)
18110 {
18111 unsigned int Rd = inst.operands[0].reg;
18112 unsigned int Rn = inst.operands[1].reg;
18113 unsigned int Rm = inst.operands[2].reg;
18114
18115 set_it_insn_type (OUTSIDE_IT_INSN);
18116 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
18117 inst.instruction |= LOW4 (Rn) << 16;
18118 inst.instruction |= LOW4 (Rm);
18119 inst.instruction |= sz << (thumb_mode ? 4 : 21);
18120 inst.instruction |= poly << (thumb_mode ? 20 : 9);
18121
18122 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
18123 as_warn (UNPRED_REG ("r15"));
18124 }
18125
18126 static void
18127 do_crc32b (void)
18128 {
18129 do_crc32_1 (0, 0);
18130 }
18131
18132 static void
18133 do_crc32h (void)
18134 {
18135 do_crc32_1 (0, 1);
18136 }
18137
18138 static void
18139 do_crc32w (void)
18140 {
18141 do_crc32_1 (0, 2);
18142 }
18143
18144 static void
18145 do_crc32cb (void)
18146 {
18147 do_crc32_1 (1, 0);
18148 }
18149
18150 static void
18151 do_crc32ch (void)
18152 {
18153 do_crc32_1 (1, 1);
18154 }
18155
18156 static void
18157 do_crc32cw (void)
18158 {
18159 do_crc32_1 (1, 2);
18160 }
18161
18162 static void
18163 do_vjcvt (void)
18164 {
18165 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18166 _(BAD_FPU));
18167 neon_check_type (2, NS_FD, N_S32, N_F64);
18168 do_vfp_sp_dp_cvt ();
18169 do_vfp_cond_or_thumb ();
18170 }
18171
18172 \f
18173 /* Overall per-instruction processing. */
18174
18175 /* We need to be able to fix up arbitrary expressions in some statements.
18176 This is so that we can handle symbols that are an arbitrary distance from
18177 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18178 which returns part of an address in a form which will be valid for
18179 a data instruction. We do this by pushing the expression into a symbol
18180 in the expr_section, and creating a fix for that. */
18181
18182 static void
18183 fix_new_arm (fragS * frag,
18184 int where,
18185 short int size,
18186 expressionS * exp,
18187 int pc_rel,
18188 int reloc)
18189 {
18190 fixS * new_fix;
18191
18192 switch (exp->X_op)
18193 {
18194 case O_constant:
18195 if (pc_rel)
18196 {
18197 /* Create an absolute valued symbol, so we have something to
18198 refer to in the object file. Unfortunately for us, gas's
18199 generic expression parsing will already have folded out
18200 any use of .set foo/.type foo %function that may have
18201 been used to set type information of the target location,
18202 that's being specified symbolically. We have to presume
18203 the user knows what they are doing. */
18204 char name[16 + 8];
18205 symbolS *symbol;
18206
18207 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
18208
18209 symbol = symbol_find_or_make (name);
18210 S_SET_SEGMENT (symbol, absolute_section);
18211 symbol_set_frag (symbol, &zero_address_frag);
18212 S_SET_VALUE (symbol, exp->X_add_number);
18213 exp->X_op = O_symbol;
18214 exp->X_add_symbol = symbol;
18215 exp->X_add_number = 0;
18216 }
18217 /* FALLTHROUGH */
18218 case O_symbol:
18219 case O_add:
18220 case O_subtract:
18221 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
18222 (enum bfd_reloc_code_real) reloc);
18223 break;
18224
18225 default:
18226 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
18227 pc_rel, (enum bfd_reloc_code_real) reloc);
18228 break;
18229 }
18230
18231 /* Mark whether the fix is to a THUMB instruction, or an ARM
18232 instruction. */
18233 new_fix->tc_fix_data = thumb_mode;
18234 }
18235
18236 /* Create a frg for an instruction requiring relaxation. */
18237 static void
18238 output_relax_insn (void)
18239 {
18240 char * to;
18241 symbolS *sym;
18242 int offset;
18243
18244 /* The size of the instruction is unknown, so tie the debug info to the
18245 start of the instruction. */
18246 dwarf2_emit_insn (0);
18247
18248 switch (inst.relocs[0].exp.X_op)
18249 {
18250 case O_symbol:
18251 sym = inst.relocs[0].exp.X_add_symbol;
18252 offset = inst.relocs[0].exp.X_add_number;
18253 break;
18254 case O_constant:
18255 sym = NULL;
18256 offset = inst.relocs[0].exp.X_add_number;
18257 break;
18258 default:
18259 sym = make_expr_symbol (&inst.relocs[0].exp);
18260 offset = 0;
18261 break;
18262 }
18263 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
18264 inst.relax, sym, offset, NULL/*offset, opcode*/);
18265 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
18266 }
18267
18268 /* Write a 32-bit thumb instruction to buf. */
18269 static void
18270 put_thumb32_insn (char * buf, unsigned long insn)
18271 {
18272 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
18273 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
18274 }
18275
18276 static void
18277 output_inst (const char * str)
18278 {
18279 char * to = NULL;
18280
18281 if (inst.error)
18282 {
18283 as_bad ("%s -- `%s'", inst.error, str);
18284 return;
18285 }
18286 if (inst.relax)
18287 {
18288 output_relax_insn ();
18289 return;
18290 }
18291 if (inst.size == 0)
18292 return;
18293
18294 to = frag_more (inst.size);
18295 /* PR 9814: Record the thumb mode into the current frag so that we know
18296 what type of NOP padding to use, if necessary. We override any previous
18297 setting so that if the mode has changed then the NOPS that we use will
18298 match the encoding of the last instruction in the frag. */
18299 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18300
18301 if (thumb_mode && (inst.size > THUMB_SIZE))
18302 {
18303 gas_assert (inst.size == (2 * THUMB_SIZE));
18304 put_thumb32_insn (to, inst.instruction);
18305 }
18306 else if (inst.size > INSN_SIZE)
18307 {
18308 gas_assert (inst.size == (2 * INSN_SIZE));
18309 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18310 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18311 }
18312 else
18313 md_number_to_chars (to, inst.instruction, inst.size);
18314
18315 int r;
18316 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
18317 {
18318 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
18319 fix_new_arm (frag_now, to - frag_now->fr_literal,
18320 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
18321 inst.relocs[r].type);
18322 }
18323
18324 dwarf2_emit_insn (inst.size);
18325 }
18326
18327 static char *
18328 output_it_inst (int cond, int mask, char * to)
18329 {
18330 unsigned long instruction = 0xbf00;
18331
18332 mask &= 0xf;
18333 instruction |= mask;
18334 instruction |= cond << 4;
18335
18336 if (to == NULL)
18337 {
18338 to = frag_more (2);
18339 #ifdef OBJ_ELF
18340 dwarf2_emit_insn (2);
18341 #endif
18342 }
18343
18344 md_number_to_chars (to, instruction, 2);
18345
18346 return to;
18347 }
18348
18349 /* Tag values used in struct asm_opcode's tag field. */
18350 enum opcode_tag
18351 {
18352 OT_unconditional, /* Instruction cannot be conditionalized.
18353 The ARM condition field is still 0xE. */
18354 OT_unconditionalF, /* Instruction cannot be conditionalized
18355 and carries 0xF in its ARM condition field. */
18356 OT_csuffix, /* Instruction takes a conditional suffix. */
18357 OT_csuffixF, /* Some forms of the instruction take a conditional
18358 suffix, others place 0xF where the condition field
18359 would be. */
18360 OT_cinfix3, /* Instruction takes a conditional infix,
18361 beginning at character index 3. (In
18362 unified mode, it becomes a suffix.) */
18363 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18364 tsts, cmps, cmns, and teqs. */
18365 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18366 character index 3, even in unified mode. Used for
18367 legacy instructions where suffix and infix forms
18368 may be ambiguous. */
18369 OT_csuf_or_in3, /* Instruction takes either a conditional
18370 suffix or an infix at character index 3. */
18371 OT_odd_infix_unc, /* This is the unconditional variant of an
18372 instruction that takes a conditional infix
18373 at an unusual position. In unified mode,
18374 this variant will accept a suffix. */
18375 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18376 are the conditional variants of instructions that
18377 take conditional infixes in unusual positions.
18378 The infix appears at character index
18379 (tag - OT_odd_infix_0). These are not accepted
18380 in unified mode. */
18381 };
18382
18383 /* Subroutine of md_assemble, responsible for looking up the primary
18384 opcode from the mnemonic the user wrote. STR points to the
18385 beginning of the mnemonic.
18386
18387 This is not simply a hash table lookup, because of conditional
18388 variants. Most instructions have conditional variants, which are
18389 expressed with a _conditional affix_ to the mnemonic. If we were
18390 to encode each conditional variant as a literal string in the opcode
18391 table, it would have approximately 20,000 entries.
18392
18393 Most mnemonics take this affix as a suffix, and in unified syntax,
18394 'most' is upgraded to 'all'. However, in the divided syntax, some
18395 instructions take the affix as an infix, notably the s-variants of
18396 the arithmetic instructions. Of those instructions, all but six
18397 have the infix appear after the third character of the mnemonic.
18398
18399 Accordingly, the algorithm for looking up primary opcodes given
18400 an identifier is:
18401
18402 1. Look up the identifier in the opcode table.
18403 If we find a match, go to step U.
18404
18405 2. Look up the last two characters of the identifier in the
18406 conditions table. If we find a match, look up the first N-2
18407 characters of the identifier in the opcode table. If we
18408 find a match, go to step CE.
18409
18410 3. Look up the fourth and fifth characters of the identifier in
18411 the conditions table. If we find a match, extract those
18412 characters from the identifier, and look up the remaining
18413 characters in the opcode table. If we find a match, go
18414 to step CM.
18415
18416 4. Fail.
18417
18418 U. Examine the tag field of the opcode structure, in case this is
18419 one of the six instructions with its conditional infix in an
18420 unusual place. If it is, the tag tells us where to find the
18421 infix; look it up in the conditions table and set inst.cond
18422 accordingly. Otherwise, this is an unconditional instruction.
18423 Again set inst.cond accordingly. Return the opcode structure.
18424
18425 CE. Examine the tag field to make sure this is an instruction that
18426 should receive a conditional suffix. If it is not, fail.
18427 Otherwise, set inst.cond from the suffix we already looked up,
18428 and return the opcode structure.
18429
18430 CM. Examine the tag field to make sure this is an instruction that
18431 should receive a conditional infix after the third character.
18432 If it is not, fail. Otherwise, undo the edits to the current
18433 line of input and proceed as for case CE. */
18434
18435 static const struct asm_opcode *
18436 opcode_lookup (char **str)
18437 {
18438 char *end, *base;
18439 char *affix;
18440 const struct asm_opcode *opcode;
18441 const struct asm_cond *cond;
18442 char save[2];
18443
18444 /* Scan up to the end of the mnemonic, which must end in white space,
18445 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18446 for (base = end = *str; *end != '\0'; end++)
18447 if (*end == ' ' || *end == '.')
18448 break;
18449
18450 if (end == base)
18451 return NULL;
18452
18453 /* Handle a possible width suffix and/or Neon type suffix. */
18454 if (end[0] == '.')
18455 {
18456 int offset = 2;
18457
18458 /* The .w and .n suffixes are only valid if the unified syntax is in
18459 use. */
18460 if (unified_syntax && end[1] == 'w')
18461 inst.size_req = 4;
18462 else if (unified_syntax && end[1] == 'n')
18463 inst.size_req = 2;
18464 else
18465 offset = 0;
18466
18467 inst.vectype.elems = 0;
18468
18469 *str = end + offset;
18470
18471 if (end[offset] == '.')
18472 {
18473 /* See if we have a Neon type suffix (possible in either unified or
18474 non-unified ARM syntax mode). */
18475 if (parse_neon_type (&inst.vectype, str) == FAIL)
18476 return NULL;
18477 }
18478 else if (end[offset] != '\0' && end[offset] != ' ')
18479 return NULL;
18480 }
18481 else
18482 *str = end;
18483
18484 /* Look for unaffixed or special-case affixed mnemonic. */
18485 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18486 end - base);
18487 if (opcode)
18488 {
18489 /* step U */
18490 if (opcode->tag < OT_odd_infix_0)
18491 {
18492 inst.cond = COND_ALWAYS;
18493 return opcode;
18494 }
18495
18496 if (warn_on_deprecated && unified_syntax)
18497 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18498 affix = base + (opcode->tag - OT_odd_infix_0);
18499 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18500 gas_assert (cond);
18501
18502 inst.cond = cond->value;
18503 return opcode;
18504 }
18505
18506 /* Cannot have a conditional suffix on a mnemonic of less than two
18507 characters. */
18508 if (end - base < 3)
18509 return NULL;
18510
18511 /* Look for suffixed mnemonic. */
18512 affix = end - 2;
18513 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18514 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18515 affix - base);
18516 if (opcode && cond)
18517 {
18518 /* step CE */
18519 switch (opcode->tag)
18520 {
18521 case OT_cinfix3_legacy:
18522 /* Ignore conditional suffixes matched on infix only mnemonics. */
18523 break;
18524
18525 case OT_cinfix3:
18526 case OT_cinfix3_deprecated:
18527 case OT_odd_infix_unc:
18528 if (!unified_syntax)
18529 return NULL;
18530 /* Fall through. */
18531
18532 case OT_csuffix:
18533 case OT_csuffixF:
18534 case OT_csuf_or_in3:
18535 inst.cond = cond->value;
18536 return opcode;
18537
18538 case OT_unconditional:
18539 case OT_unconditionalF:
18540 if (thumb_mode)
18541 inst.cond = cond->value;
18542 else
18543 {
18544 /* Delayed diagnostic. */
18545 inst.error = BAD_COND;
18546 inst.cond = COND_ALWAYS;
18547 }
18548 return opcode;
18549
18550 default:
18551 return NULL;
18552 }
18553 }
18554
18555 /* Cannot have a usual-position infix on a mnemonic of less than
18556 six characters (five would be a suffix). */
18557 if (end - base < 6)
18558 return NULL;
18559
18560 /* Look for infixed mnemonic in the usual position. */
18561 affix = base + 3;
18562 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18563 if (!cond)
18564 return NULL;
18565
18566 memcpy (save, affix, 2);
18567 memmove (affix, affix + 2, (end - affix) - 2);
18568 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18569 (end - base) - 2);
18570 memmove (affix + 2, affix, (end - affix) - 2);
18571 memcpy (affix, save, 2);
18572
18573 if (opcode
18574 && (opcode->tag == OT_cinfix3
18575 || opcode->tag == OT_cinfix3_deprecated
18576 || opcode->tag == OT_csuf_or_in3
18577 || opcode->tag == OT_cinfix3_legacy))
18578 {
18579 /* Step CM. */
18580 if (warn_on_deprecated && unified_syntax
18581 && (opcode->tag == OT_cinfix3
18582 || opcode->tag == OT_cinfix3_deprecated))
18583 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18584
18585 inst.cond = cond->value;
18586 return opcode;
18587 }
18588
18589 return NULL;
18590 }
18591
18592 /* This function generates an initial IT instruction, leaving its block
18593 virtually open for the new instructions. Eventually,
18594 the mask will be updated by now_it_add_mask () each time
18595 a new instruction needs to be included in the IT block.
18596 Finally, the block is closed with close_automatic_it_block ().
18597 The block closure can be requested either from md_assemble (),
18598 a tencode (), or due to a label hook. */
18599
18600 static void
18601 new_automatic_it_block (int cond)
18602 {
18603 now_it.state = AUTOMATIC_IT_BLOCK;
18604 now_it.mask = 0x18;
18605 now_it.cc = cond;
18606 now_it.block_length = 1;
18607 mapping_state (MAP_THUMB);
18608 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18609 now_it.warn_deprecated = FALSE;
18610 now_it.insn_cond = TRUE;
18611 }
18612
18613 /* Close an automatic IT block.
18614 See comments in new_automatic_it_block (). */
18615
18616 static void
18617 close_automatic_it_block (void)
18618 {
18619 now_it.mask = 0x10;
18620 now_it.block_length = 0;
18621 }
18622
18623 /* Update the mask of the current automatically-generated IT
18624 instruction. See comments in new_automatic_it_block (). */
18625
18626 static void
18627 now_it_add_mask (int cond)
18628 {
18629 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18630 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18631 | ((bitvalue) << (nbit)))
18632 const int resulting_bit = (cond & 1);
18633
18634 now_it.mask &= 0xf;
18635 now_it.mask = SET_BIT_VALUE (now_it.mask,
18636 resulting_bit,
18637 (5 - now_it.block_length));
18638 now_it.mask = SET_BIT_VALUE (now_it.mask,
18639 1,
18640 ((5 - now_it.block_length) - 1) );
18641 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18642
18643 #undef CLEAR_BIT
18644 #undef SET_BIT_VALUE
18645 }
18646
18647 /* The IT blocks handling machinery is accessed through the these functions:
18648 it_fsm_pre_encode () from md_assemble ()
18649 set_it_insn_type () optional, from the tencode functions
18650 set_it_insn_type_last () ditto
18651 in_it_block () ditto
18652 it_fsm_post_encode () from md_assemble ()
18653 force_automatic_it_block_close () from label handling functions
18654
18655 Rationale:
18656 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18657 initializing the IT insn type with a generic initial value depending
18658 on the inst.condition.
18659 2) During the tencode function, two things may happen:
18660 a) The tencode function overrides the IT insn type by
18661 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18662 b) The tencode function queries the IT block state by
18663 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18664
18665 Both set_it_insn_type and in_it_block run the internal FSM state
18666 handling function (handle_it_state), because: a) setting the IT insn
18667 type may incur in an invalid state (exiting the function),
18668 and b) querying the state requires the FSM to be updated.
18669 Specifically we want to avoid creating an IT block for conditional
18670 branches, so it_fsm_pre_encode is actually a guess and we can't
18671 determine whether an IT block is required until the tencode () routine
18672 has decided what type of instruction this actually it.
18673 Because of this, if set_it_insn_type and in_it_block have to be used,
18674 set_it_insn_type has to be called first.
18675
18676 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18677 determines the insn IT type depending on the inst.cond code.
18678 When a tencode () routine encodes an instruction that can be
18679 either outside an IT block, or, in the case of being inside, has to be
18680 the last one, set_it_insn_type_last () will determine the proper
18681 IT instruction type based on the inst.cond code. Otherwise,
18682 set_it_insn_type can be called for overriding that logic or
18683 for covering other cases.
18684
18685 Calling handle_it_state () may not transition the IT block state to
18686 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18687 still queried. Instead, if the FSM determines that the state should
18688 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18689 after the tencode () function: that's what it_fsm_post_encode () does.
18690
18691 Since in_it_block () calls the state handling function to get an
18692 updated state, an error may occur (due to invalid insns combination).
18693 In that case, inst.error is set.
18694 Therefore, inst.error has to be checked after the execution of
18695 the tencode () routine.
18696
18697 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18698 any pending state change (if any) that didn't take place in
18699 handle_it_state () as explained above. */
18700
18701 static void
18702 it_fsm_pre_encode (void)
18703 {
18704 if (inst.cond != COND_ALWAYS)
18705 inst.it_insn_type = INSIDE_IT_INSN;
18706 else
18707 inst.it_insn_type = OUTSIDE_IT_INSN;
18708
18709 now_it.state_handled = 0;
18710 }
18711
18712 /* IT state FSM handling function. */
18713
18714 static int
18715 handle_it_state (void)
18716 {
18717 now_it.state_handled = 1;
18718 now_it.insn_cond = FALSE;
18719
18720 switch (now_it.state)
18721 {
18722 case OUTSIDE_IT_BLOCK:
18723 switch (inst.it_insn_type)
18724 {
18725 case OUTSIDE_IT_INSN:
18726 break;
18727
18728 case INSIDE_IT_INSN:
18729 case INSIDE_IT_LAST_INSN:
18730 if (thumb_mode == 0)
18731 {
18732 if (unified_syntax
18733 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18734 as_tsktsk (_("Warning: conditional outside an IT block"\
18735 " for Thumb."));
18736 }
18737 else
18738 {
18739 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18740 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18741 {
18742 /* Automatically generate the IT instruction. */
18743 new_automatic_it_block (inst.cond);
18744 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18745 close_automatic_it_block ();
18746 }
18747 else
18748 {
18749 inst.error = BAD_OUT_IT;
18750 return FAIL;
18751 }
18752 }
18753 break;
18754
18755 case IF_INSIDE_IT_LAST_INSN:
18756 case NEUTRAL_IT_INSN:
18757 break;
18758
18759 case IT_INSN:
18760 now_it.state = MANUAL_IT_BLOCK;
18761 now_it.block_length = 0;
18762 break;
18763 }
18764 break;
18765
18766 case AUTOMATIC_IT_BLOCK:
18767 /* Three things may happen now:
18768 a) We should increment current it block size;
18769 b) We should close current it block (closing insn or 4 insns);
18770 c) We should close current it block and start a new one (due
18771 to incompatible conditions or
18772 4 insns-length block reached). */
18773
18774 switch (inst.it_insn_type)
18775 {
18776 case OUTSIDE_IT_INSN:
18777 /* The closure of the block shall happen immediately,
18778 so any in_it_block () call reports the block as closed. */
18779 force_automatic_it_block_close ();
18780 break;
18781
18782 case INSIDE_IT_INSN:
18783 case INSIDE_IT_LAST_INSN:
18784 case IF_INSIDE_IT_LAST_INSN:
18785 now_it.block_length++;
18786
18787 if (now_it.block_length > 4
18788 || !now_it_compatible (inst.cond))
18789 {
18790 force_automatic_it_block_close ();
18791 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18792 new_automatic_it_block (inst.cond);
18793 }
18794 else
18795 {
18796 now_it.insn_cond = TRUE;
18797 now_it_add_mask (inst.cond);
18798 }
18799
18800 if (now_it.state == AUTOMATIC_IT_BLOCK
18801 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18802 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18803 close_automatic_it_block ();
18804 break;
18805
18806 case NEUTRAL_IT_INSN:
18807 now_it.block_length++;
18808 now_it.insn_cond = TRUE;
18809
18810 if (now_it.block_length > 4)
18811 force_automatic_it_block_close ();
18812 else
18813 now_it_add_mask (now_it.cc & 1);
18814 break;
18815
18816 case IT_INSN:
18817 close_automatic_it_block ();
18818 now_it.state = MANUAL_IT_BLOCK;
18819 break;
18820 }
18821 break;
18822
18823 case MANUAL_IT_BLOCK:
18824 {
18825 /* Check conditional suffixes. */
18826 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18827 int is_last;
18828 now_it.mask <<= 1;
18829 now_it.mask &= 0x1f;
18830 is_last = (now_it.mask == 0x10);
18831 now_it.insn_cond = TRUE;
18832
18833 switch (inst.it_insn_type)
18834 {
18835 case OUTSIDE_IT_INSN:
18836 inst.error = BAD_NOT_IT;
18837 return FAIL;
18838
18839 case INSIDE_IT_INSN:
18840 if (cond != inst.cond)
18841 {
18842 inst.error = BAD_IT_COND;
18843 return FAIL;
18844 }
18845 break;
18846
18847 case INSIDE_IT_LAST_INSN:
18848 case IF_INSIDE_IT_LAST_INSN:
18849 if (cond != inst.cond)
18850 {
18851 inst.error = BAD_IT_COND;
18852 return FAIL;
18853 }
18854 if (!is_last)
18855 {
18856 inst.error = BAD_BRANCH;
18857 return FAIL;
18858 }
18859 break;
18860
18861 case NEUTRAL_IT_INSN:
18862 /* The BKPT instruction is unconditional even in an IT block. */
18863 break;
18864
18865 case IT_INSN:
18866 inst.error = BAD_IT_IT;
18867 return FAIL;
18868 }
18869 }
18870 break;
18871 }
18872
18873 return SUCCESS;
18874 }
18875
18876 struct depr_insn_mask
18877 {
18878 unsigned long pattern;
18879 unsigned long mask;
18880 const char* description;
18881 };
18882
18883 /* List of 16-bit instruction patterns deprecated in an IT block in
18884 ARMv8. */
18885 static const struct depr_insn_mask depr_it_insns[] = {
18886 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18887 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18888 { 0xa000, 0xb800, N_("ADR") },
18889 { 0x4800, 0xf800, N_("Literal loads") },
18890 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18891 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18892 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18893 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18894 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18895 { 0, 0, NULL }
18896 };
18897
18898 static void
18899 it_fsm_post_encode (void)
18900 {
18901 int is_last;
18902
18903 if (!now_it.state_handled)
18904 handle_it_state ();
18905
18906 if (now_it.insn_cond
18907 && !now_it.warn_deprecated
18908 && warn_on_deprecated
18909 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
18910 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
18911 {
18912 if (inst.instruction >= 0x10000)
18913 {
18914 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18915 "performance deprecated in ARMv8-A and ARMv8-R"));
18916 now_it.warn_deprecated = TRUE;
18917 }
18918 else
18919 {
18920 const struct depr_insn_mask *p = depr_it_insns;
18921
18922 while (p->mask != 0)
18923 {
18924 if ((inst.instruction & p->mask) == p->pattern)
18925 {
18926 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18927 "instructions of the following class are "
18928 "performance deprecated in ARMv8-A and "
18929 "ARMv8-R: %s"), p->description);
18930 now_it.warn_deprecated = TRUE;
18931 break;
18932 }
18933
18934 ++p;
18935 }
18936 }
18937
18938 if (now_it.block_length > 1)
18939 {
18940 as_tsktsk (_("IT blocks containing more than one conditional "
18941 "instruction are performance deprecated in ARMv8-A and "
18942 "ARMv8-R"));
18943 now_it.warn_deprecated = TRUE;
18944 }
18945 }
18946
18947 is_last = (now_it.mask == 0x10);
18948 if (is_last)
18949 {
18950 now_it.state = OUTSIDE_IT_BLOCK;
18951 now_it.mask = 0;
18952 }
18953 }
18954
18955 static void
18956 force_automatic_it_block_close (void)
18957 {
18958 if (now_it.state == AUTOMATIC_IT_BLOCK)
18959 {
18960 close_automatic_it_block ();
18961 now_it.state = OUTSIDE_IT_BLOCK;
18962 now_it.mask = 0;
18963 }
18964 }
18965
18966 static int
18967 in_it_block (void)
18968 {
18969 if (!now_it.state_handled)
18970 handle_it_state ();
18971
18972 return now_it.state != OUTSIDE_IT_BLOCK;
18973 }
18974
18975 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18976 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18977 here, hence the "known" in the function name. */
18978
18979 static bfd_boolean
18980 known_t32_only_insn (const struct asm_opcode *opcode)
18981 {
18982 /* Original Thumb-1 wide instruction. */
18983 if (opcode->tencode == do_t_blx
18984 || opcode->tencode == do_t_branch23
18985 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18986 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18987 return TRUE;
18988
18989 /* Wide-only instruction added to ARMv8-M Baseline. */
18990 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18991 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18992 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18993 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18994 return TRUE;
18995
18996 return FALSE;
18997 }
18998
18999 /* Whether wide instruction variant can be used if available for a valid OPCODE
19000 in ARCH. */
19001
19002 static bfd_boolean
19003 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
19004 {
19005 if (known_t32_only_insn (opcode))
19006 return TRUE;
19007
19008 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19009 of variant T3 of B.W is checked in do_t_branch. */
19010 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19011 && opcode->tencode == do_t_branch)
19012 return TRUE;
19013
19014 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19015 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19016 && opcode->tencode == do_t_mov_cmp
19017 /* Make sure CMP instruction is not affected. */
19018 && opcode->aencode == do_mov)
19019 return TRUE;
19020
19021 /* Wide instruction variants of all instructions with narrow *and* wide
19022 variants become available with ARMv6t2. Other opcodes are either
19023 narrow-only or wide-only and are thus available if OPCODE is valid. */
19024 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
19025 return TRUE;
19026
19027 /* OPCODE with narrow only instruction variant or wide variant not
19028 available. */
19029 return FALSE;
19030 }
19031
19032 void
19033 md_assemble (char *str)
19034 {
19035 char *p = str;
19036 const struct asm_opcode * opcode;
19037
19038 /* Align the previous label if needed. */
19039 if (last_label_seen != NULL)
19040 {
19041 symbol_set_frag (last_label_seen, frag_now);
19042 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
19043 S_SET_SEGMENT (last_label_seen, now_seg);
19044 }
19045
19046 memset (&inst, '\0', sizeof (inst));
19047 int r;
19048 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19049 inst.relocs[r].type = BFD_RELOC_UNUSED;
19050
19051 opcode = opcode_lookup (&p);
19052 if (!opcode)
19053 {
19054 /* It wasn't an instruction, but it might be a register alias of
19055 the form alias .req reg, or a Neon .dn/.qn directive. */
19056 if (! create_register_alias (str, p)
19057 && ! create_neon_reg_alias (str, p))
19058 as_bad (_("bad instruction `%s'"), str);
19059
19060 return;
19061 }
19062
19063 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
19064 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19065
19066 /* The value which unconditional instructions should have in place of the
19067 condition field. */
19068 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
19069
19070 if (thumb_mode)
19071 {
19072 arm_feature_set variant;
19073
19074 variant = cpu_variant;
19075 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19076 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
19077 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
19078 /* Check that this instruction is supported for this CPU. */
19079 if (!opcode->tvariant
19080 || (thumb_mode == 1
19081 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
19082 {
19083 if (opcode->tencode == do_t_swi)
19084 as_bad (_("SVC is not permitted on this architecture"));
19085 else
19086 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
19087 return;
19088 }
19089 if (inst.cond != COND_ALWAYS && !unified_syntax
19090 && opcode->tencode != do_t_branch)
19091 {
19092 as_bad (_("Thumb does not support conditional execution"));
19093 return;
19094 }
19095
19096 /* Two things are addressed here:
19097 1) Implicit require narrow instructions on Thumb-1.
19098 This avoids relaxation accidentally introducing Thumb-2
19099 instructions.
19100 2) Reject wide instructions in non Thumb-2 cores.
19101
19102 Only instructions with narrow and wide variants need to be handled
19103 but selecting all non wide-only instructions is easier. */
19104 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
19105 && !t32_insn_ok (variant, opcode))
19106 {
19107 if (inst.size_req == 0)
19108 inst.size_req = 2;
19109 else if (inst.size_req == 4)
19110 {
19111 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
19112 as_bad (_("selected processor does not support 32bit wide "
19113 "variant of instruction `%s'"), str);
19114 else
19115 as_bad (_("selected processor does not support `%s' in "
19116 "Thumb-2 mode"), str);
19117 return;
19118 }
19119 }
19120
19121 inst.instruction = opcode->tvalue;
19122
19123 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
19124 {
19125 /* Prepare the it_insn_type for those encodings that don't set
19126 it. */
19127 it_fsm_pre_encode ();
19128
19129 opcode->tencode ();
19130
19131 it_fsm_post_encode ();
19132 }
19133
19134 if (!(inst.error || inst.relax))
19135 {
19136 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
19137 inst.size = (inst.instruction > 0xffff ? 4 : 2);
19138 if (inst.size_req && inst.size_req != inst.size)
19139 {
19140 as_bad (_("cannot honor width suffix -- `%s'"), str);
19141 return;
19142 }
19143 }
19144
19145 /* Something has gone badly wrong if we try to relax a fixed size
19146 instruction. */
19147 gas_assert (inst.size_req == 0 || !inst.relax);
19148
19149 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19150 *opcode->tvariant);
19151 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19152 set those bits when Thumb-2 32-bit instructions are seen. The impact
19153 of relaxable instructions will be considered later after we finish all
19154 relaxation. */
19155 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
19156 variant = arm_arch_none;
19157 else
19158 variant = cpu_variant;
19159 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
19160 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19161 arm_ext_v6t2);
19162
19163 check_neon_suffixes;
19164
19165 if (!inst.error)
19166 {
19167 mapping_state (MAP_THUMB);
19168 }
19169 }
19170 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19171 {
19172 bfd_boolean is_bx;
19173
19174 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19175 is_bx = (opcode->aencode == do_bx);
19176
19177 /* Check that this instruction is supported for this CPU. */
19178 if (!(is_bx && fix_v4bx)
19179 && !(opcode->avariant &&
19180 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
19181 {
19182 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
19183 return;
19184 }
19185 if (inst.size_req)
19186 {
19187 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
19188 return;
19189 }
19190
19191 inst.instruction = opcode->avalue;
19192 if (opcode->tag == OT_unconditionalF)
19193 inst.instruction |= 0xFU << 28;
19194 else
19195 inst.instruction |= inst.cond << 28;
19196 inst.size = INSN_SIZE;
19197 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
19198 {
19199 it_fsm_pre_encode ();
19200 opcode->aencode ();
19201 it_fsm_post_encode ();
19202 }
19203 /* Arm mode bx is marked as both v4T and v5 because it's still required
19204 on a hypothetical non-thumb v5 core. */
19205 if (is_bx)
19206 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
19207 else
19208 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
19209 *opcode->avariant);
19210
19211 check_neon_suffixes;
19212
19213 if (!inst.error)
19214 {
19215 mapping_state (MAP_ARM);
19216 }
19217 }
19218 else
19219 {
19220 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19221 "-- `%s'"), str);
19222 return;
19223 }
19224 output_inst (str);
19225 }
19226
19227 static void
19228 check_it_blocks_finished (void)
19229 {
19230 #ifdef OBJ_ELF
19231 asection *sect;
19232
19233 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
19234 if (seg_info (sect)->tc_segment_info_data.current_it.state
19235 == MANUAL_IT_BLOCK)
19236 {
19237 as_warn (_("section '%s' finished with an open IT block."),
19238 sect->name);
19239 }
19240 #else
19241 if (now_it.state == MANUAL_IT_BLOCK)
19242 as_warn (_("file finished with an open IT block."));
19243 #endif
19244 }
19245
19246 /* Various frobbings of labels and their addresses. */
19247
19248 void
19249 arm_start_line_hook (void)
19250 {
19251 last_label_seen = NULL;
19252 }
19253
19254 void
19255 arm_frob_label (symbolS * sym)
19256 {
19257 last_label_seen = sym;
19258
19259 ARM_SET_THUMB (sym, thumb_mode);
19260
19261 #if defined OBJ_COFF || defined OBJ_ELF
19262 ARM_SET_INTERWORK (sym, support_interwork);
19263 #endif
19264
19265 force_automatic_it_block_close ();
19266
19267 /* Note - do not allow local symbols (.Lxxx) to be labelled
19268 as Thumb functions. This is because these labels, whilst
19269 they exist inside Thumb code, are not the entry points for
19270 possible ARM->Thumb calls. Also, these labels can be used
19271 as part of a computed goto or switch statement. eg gcc
19272 can generate code that looks like this:
19273
19274 ldr r2, [pc, .Laaa]
19275 lsl r3, r3, #2
19276 ldr r2, [r3, r2]
19277 mov pc, r2
19278
19279 .Lbbb: .word .Lxxx
19280 .Lccc: .word .Lyyy
19281 ..etc...
19282 .Laaa: .word Lbbb
19283
19284 The first instruction loads the address of the jump table.
19285 The second instruction converts a table index into a byte offset.
19286 The third instruction gets the jump address out of the table.
19287 The fourth instruction performs the jump.
19288
19289 If the address stored at .Laaa is that of a symbol which has the
19290 Thumb_Func bit set, then the linker will arrange for this address
19291 to have the bottom bit set, which in turn would mean that the
19292 address computation performed by the third instruction would end
19293 up with the bottom bit set. Since the ARM is capable of unaligned
19294 word loads, the instruction would then load the incorrect address
19295 out of the jump table, and chaos would ensue. */
19296 if (label_is_thumb_function_name
19297 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
19298 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
19299 {
19300 /* When the address of a Thumb function is taken the bottom
19301 bit of that address should be set. This will allow
19302 interworking between Arm and Thumb functions to work
19303 correctly. */
19304
19305 THUMB_SET_FUNC (sym, 1);
19306
19307 label_is_thumb_function_name = FALSE;
19308 }
19309
19310 dwarf2_emit_label (sym);
19311 }
19312
19313 bfd_boolean
19314 arm_data_in_code (void)
19315 {
19316 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19317 {
19318 *input_line_pointer = '/';
19319 input_line_pointer += 5;
19320 *input_line_pointer = 0;
19321 return TRUE;
19322 }
19323
19324 return FALSE;
19325 }
19326
19327 char *
19328 arm_canonicalize_symbol_name (char * name)
19329 {
19330 int len;
19331
19332 if (thumb_mode && (len = strlen (name)) > 5
19333 && streq (name + len - 5, "/data"))
19334 *(name + len - 5) = 0;
19335
19336 return name;
19337 }
19338 \f
19339 /* Table of all register names defined by default. The user can
19340 define additional names with .req. Note that all register names
19341 should appear in both upper and lowercase variants. Some registers
19342 also have mixed-case names. */
19343
19344 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19345 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19346 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19347 #define REGSET(p,t) \
19348 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19349 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19350 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19351 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19352 #define REGSETH(p,t) \
19353 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19354 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19355 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19356 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19357 #define REGSET2(p,t) \
19358 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19359 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19360 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19361 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19362 #define SPLRBANK(base,bank,t) \
19363 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19364 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19365 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19366 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19367 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19368 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19369
19370 static const struct reg_entry reg_names[] =
19371 {
19372 /* ARM integer registers. */
19373 REGSET(r, RN), REGSET(R, RN),
19374
19375 /* ATPCS synonyms. */
19376 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19377 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19378 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19379
19380 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19381 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19382 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19383
19384 /* Well-known aliases. */
19385 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19386 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19387
19388 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19389 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19390
19391 /* Coprocessor numbers. */
19392 REGSET(p, CP), REGSET(P, CP),
19393
19394 /* Coprocessor register numbers. The "cr" variants are for backward
19395 compatibility. */
19396 REGSET(c, CN), REGSET(C, CN),
19397 REGSET(cr, CN), REGSET(CR, CN),
19398
19399 /* ARM banked registers. */
19400 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19401 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19402 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19403 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19404 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19405 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19406 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19407
19408 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19409 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19410 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19411 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19412 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19413 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19414 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19415 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19416
19417 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19418 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19419 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19420 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19421 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19422 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19423 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19424 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19425 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19426
19427 /* FPA registers. */
19428 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19429 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19430
19431 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19432 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19433
19434 /* VFP SP registers. */
19435 REGSET(s,VFS), REGSET(S,VFS),
19436 REGSETH(s,VFS), REGSETH(S,VFS),
19437
19438 /* VFP DP Registers. */
19439 REGSET(d,VFD), REGSET(D,VFD),
19440 /* Extra Neon DP registers. */
19441 REGSETH(d,VFD), REGSETH(D,VFD),
19442
19443 /* Neon QP registers. */
19444 REGSET2(q,NQ), REGSET2(Q,NQ),
19445
19446 /* VFP control registers. */
19447 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19448 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19449 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19450 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19451 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19452 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19453 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19454
19455 /* Maverick DSP coprocessor registers. */
19456 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19457 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19458
19459 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19460 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19461 REGDEF(dspsc,0,DSPSC),
19462
19463 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19464 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19465 REGDEF(DSPSC,0,DSPSC),
19466
19467 /* iWMMXt data registers - p0, c0-15. */
19468 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19469
19470 /* iWMMXt control registers - p1, c0-3. */
19471 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19472 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19473 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19474 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19475
19476 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19477 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19478 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19479 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19480 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19481
19482 /* XScale accumulator registers. */
19483 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19484 };
19485 #undef REGDEF
19486 #undef REGNUM
19487 #undef REGSET
19488
19489 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19490 within psr_required_here. */
19491 static const struct asm_psr psrs[] =
19492 {
19493 /* Backward compatibility notation. Note that "all" is no longer
19494 truly all possible PSR bits. */
19495 {"all", PSR_c | PSR_f},
19496 {"flg", PSR_f},
19497 {"ctl", PSR_c},
19498
19499 /* Individual flags. */
19500 {"f", PSR_f},
19501 {"c", PSR_c},
19502 {"x", PSR_x},
19503 {"s", PSR_s},
19504
19505 /* Combinations of flags. */
19506 {"fs", PSR_f | PSR_s},
19507 {"fx", PSR_f | PSR_x},
19508 {"fc", PSR_f | PSR_c},
19509 {"sf", PSR_s | PSR_f},
19510 {"sx", PSR_s | PSR_x},
19511 {"sc", PSR_s | PSR_c},
19512 {"xf", PSR_x | PSR_f},
19513 {"xs", PSR_x | PSR_s},
19514 {"xc", PSR_x | PSR_c},
19515 {"cf", PSR_c | PSR_f},
19516 {"cs", PSR_c | PSR_s},
19517 {"cx", PSR_c | PSR_x},
19518 {"fsx", PSR_f | PSR_s | PSR_x},
19519 {"fsc", PSR_f | PSR_s | PSR_c},
19520 {"fxs", PSR_f | PSR_x | PSR_s},
19521 {"fxc", PSR_f | PSR_x | PSR_c},
19522 {"fcs", PSR_f | PSR_c | PSR_s},
19523 {"fcx", PSR_f | PSR_c | PSR_x},
19524 {"sfx", PSR_s | PSR_f | PSR_x},
19525 {"sfc", PSR_s | PSR_f | PSR_c},
19526 {"sxf", PSR_s | PSR_x | PSR_f},
19527 {"sxc", PSR_s | PSR_x | PSR_c},
19528 {"scf", PSR_s | PSR_c | PSR_f},
19529 {"scx", PSR_s | PSR_c | PSR_x},
19530 {"xfs", PSR_x | PSR_f | PSR_s},
19531 {"xfc", PSR_x | PSR_f | PSR_c},
19532 {"xsf", PSR_x | PSR_s | PSR_f},
19533 {"xsc", PSR_x | PSR_s | PSR_c},
19534 {"xcf", PSR_x | PSR_c | PSR_f},
19535 {"xcs", PSR_x | PSR_c | PSR_s},
19536 {"cfs", PSR_c | PSR_f | PSR_s},
19537 {"cfx", PSR_c | PSR_f | PSR_x},
19538 {"csf", PSR_c | PSR_s | PSR_f},
19539 {"csx", PSR_c | PSR_s | PSR_x},
19540 {"cxf", PSR_c | PSR_x | PSR_f},
19541 {"cxs", PSR_c | PSR_x | PSR_s},
19542 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19543 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19544 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19545 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19546 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19547 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19548 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19549 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19550 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19551 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19552 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19553 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19554 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19555 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19556 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19557 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19558 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19559 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19560 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19561 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19562 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19563 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19564 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19565 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19566 };
19567
19568 /* Table of V7M psr names. */
19569 static const struct asm_psr v7m_psrs[] =
19570 {
19571 {"apsr", 0x0 }, {"APSR", 0x0 },
19572 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19573 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19574 {"psr", 0x3 }, {"PSR", 0x3 },
19575 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19576 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19577 {"epsr", 0x6 }, {"EPSR", 0x6 },
19578 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19579 {"msp", 0x8 }, {"MSP", 0x8 },
19580 {"psp", 0x9 }, {"PSP", 0x9 },
19581 {"msplim", 0xa }, {"MSPLIM", 0xa },
19582 {"psplim", 0xb }, {"PSPLIM", 0xb },
19583 {"primask", 0x10}, {"PRIMASK", 0x10},
19584 {"basepri", 0x11}, {"BASEPRI", 0x11},
19585 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19586 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19587 {"control", 0x14}, {"CONTROL", 0x14},
19588 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19589 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19590 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19591 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19592 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19593 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19594 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19595 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19596 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19597 };
19598
19599 /* Table of all shift-in-operand names. */
19600 static const struct asm_shift_name shift_names [] =
19601 {
19602 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19603 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19604 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19605 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19606 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19607 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19608 };
19609
19610 /* Table of all explicit relocation names. */
19611 #ifdef OBJ_ELF
19612 static struct reloc_entry reloc_names[] =
19613 {
19614 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19615 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19616 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19617 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19618 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19619 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19620 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19621 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19622 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19623 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19624 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19625 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19626 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19627 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19628 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19629 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19630 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19631 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19632 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19633 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19634 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19635 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19636 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19637 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19638 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19639 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19640 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19641 };
19642 #endif
19643
19644 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19645 static const struct asm_cond conds[] =
19646 {
19647 {"eq", 0x0},
19648 {"ne", 0x1},
19649 {"cs", 0x2}, {"hs", 0x2},
19650 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19651 {"mi", 0x4},
19652 {"pl", 0x5},
19653 {"vs", 0x6},
19654 {"vc", 0x7},
19655 {"hi", 0x8},
19656 {"ls", 0x9},
19657 {"ge", 0xa},
19658 {"lt", 0xb},
19659 {"gt", 0xc},
19660 {"le", 0xd},
19661 {"al", 0xe}
19662 };
19663
19664 #define UL_BARRIER(L,U,CODE,FEAT) \
19665 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19666 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19667
19668 static struct asm_barrier_opt barrier_opt_names[] =
19669 {
19670 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19671 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19672 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19673 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19674 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19675 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19676 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19677 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19678 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19679 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19680 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19681 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19682 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19683 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19684 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19685 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19686 };
19687
19688 #undef UL_BARRIER
19689
19690 /* Table of ARM-format instructions. */
19691
19692 /* Macros for gluing together operand strings. N.B. In all cases
19693 other than OPS0, the trailing OP_stop comes from default
19694 zero-initialization of the unspecified elements of the array. */
19695 #define OPS0() { OP_stop, }
19696 #define OPS1(a) { OP_##a, }
19697 #define OPS2(a,b) { OP_##a,OP_##b, }
19698 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19699 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19700 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19701 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19702
19703 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19704 This is useful when mixing operands for ARM and THUMB, i.e. using the
19705 MIX_ARM_THUMB_OPERANDS macro.
19706 In order to use these macros, prefix the number of operands with _
19707 e.g. _3. */
19708 #define OPS_1(a) { a, }
19709 #define OPS_2(a,b) { a,b, }
19710 #define OPS_3(a,b,c) { a,b,c, }
19711 #define OPS_4(a,b,c,d) { a,b,c,d, }
19712 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19713 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19714
19715 /* These macros abstract out the exact format of the mnemonic table and
19716 save some repeated characters. */
19717
19718 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19719 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19720 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19721 THUMB_VARIANT, do_##ae, do_##te }
19722
19723 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19724 a T_MNEM_xyz enumerator. */
19725 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19726 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19727 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19728 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19729
19730 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19731 infix after the third character. */
19732 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19733 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19734 THUMB_VARIANT, do_##ae, do_##te }
19735 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19736 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19737 THUMB_VARIANT, do_##ae, do_##te }
19738 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19739 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19740 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19741 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19742 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19743 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19744 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19745 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19746
19747 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19748 field is still 0xE. Many of the Thumb variants can be executed
19749 conditionally, so this is checked separately. */
19750 #define TUE(mnem, op, top, nops, ops, ae, te) \
19751 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19752 THUMB_VARIANT, do_##ae, do_##te }
19753
19754 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19755 Used by mnemonics that have very minimal differences in the encoding for
19756 ARM and Thumb variants and can be handled in a common function. */
19757 #define TUEc(mnem, op, top, nops, ops, en) \
19758 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19759 THUMB_VARIANT, do_##en, do_##en }
19760
19761 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19762 condition code field. */
19763 #define TUF(mnem, op, top, nops, ops, ae, te) \
19764 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19765 THUMB_VARIANT, do_##ae, do_##te }
19766
19767 /* ARM-only variants of all the above. */
19768 #define CE(mnem, op, nops, ops, ae) \
19769 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19770
19771 #define C3(mnem, op, nops, ops, ae) \
19772 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19773
19774 /* Thumb-only variants of TCE and TUE. */
19775 #define ToC(mnem, top, nops, ops, te) \
19776 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19777 do_##te }
19778
19779 #define ToU(mnem, top, nops, ops, te) \
19780 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19781 NULL, do_##te }
19782
19783 /* T_MNEM_xyz enumerator variants of ToC. */
19784 #define toC(mnem, top, nops, ops, te) \
19785 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19786 do_##te }
19787
19788 /* T_MNEM_xyz enumerator variants of ToU. */
19789 #define toU(mnem, top, nops, ops, te) \
19790 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19791 NULL, do_##te }
19792
19793 /* Legacy mnemonics that always have conditional infix after the third
19794 character. */
19795 #define CL(mnem, op, nops, ops, ae) \
19796 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19797 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19798
19799 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19800 #define cCE(mnem, op, nops, ops, ae) \
19801 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19802
19803 /* Legacy coprocessor instructions where conditional infix and conditional
19804 suffix are ambiguous. For consistency this includes all FPA instructions,
19805 not just the potentially ambiguous ones. */
19806 #define cCL(mnem, op, nops, ops, ae) \
19807 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19808 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19809
19810 /* Coprocessor, takes either a suffix or a position-3 infix
19811 (for an FPA corner case). */
19812 #define C3E(mnem, op, nops, ops, ae) \
19813 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19814 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19815
19816 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19817 { m1 #m2 m3, OPS##nops ops, \
19818 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19819 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19820
19821 #define CM(m1, m2, op, nops, ops, ae) \
19822 xCM_ (m1, , m2, op, nops, ops, ae), \
19823 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19824 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19825 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19826 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19827 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19828 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19829 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19830 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19831 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19832 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19833 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19834 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19835 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19836 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19837 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19838 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19839 xCM_ (m1, le, m2, op, nops, ops, ae), \
19840 xCM_ (m1, al, m2, op, nops, ops, ae)
19841
19842 #define UE(mnem, op, nops, ops, ae) \
19843 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19844
19845 #define UF(mnem, op, nops, ops, ae) \
19846 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19847
19848 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19849 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19850 use the same encoding function for each. */
19851 #define NUF(mnem, op, nops, ops, enc) \
19852 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19853 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19854
19855 /* Neon data processing, version which indirects through neon_enc_tab for
19856 the various overloaded versions of opcodes. */
19857 #define nUF(mnem, op, nops, ops, enc) \
19858 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19859 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19860
19861 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19862 version. */
19863 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19864 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19865 THUMB_VARIANT, do_##enc, do_##enc }
19866
19867 #define NCE(mnem, op, nops, ops, enc) \
19868 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19869
19870 #define NCEF(mnem, op, nops, ops, enc) \
19871 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19872
19873 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19874 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19875 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19876 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19877
19878 #define nCE(mnem, op, nops, ops, enc) \
19879 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19880
19881 #define nCEF(mnem, op, nops, ops, enc) \
19882 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19883
19884 #define do_0 0
19885
19886 static const struct asm_opcode insns[] =
19887 {
19888 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19889 #define THUMB_VARIANT & arm_ext_v4t
19890 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19891 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19892 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19893 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19894 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19895 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19896 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19897 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19898 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19899 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19900 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19901 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19902 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19903 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19904 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19905 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19906
19907 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19908 for setting PSR flag bits. They are obsolete in V6 and do not
19909 have Thumb equivalents. */
19910 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19911 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19912 CL("tstp", 110f000, 2, (RR, SH), cmp),
19913 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19914 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19915 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19916 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19917 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19918 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19919
19920 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19921 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19922 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19923 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19924
19925 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19926 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19927 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19928 OP_RRnpc),
19929 OP_ADDRGLDR),ldst, t_ldst),
19930 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19931
19932 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19933 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19934 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19935 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19936 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19937 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19938
19939 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19940 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19941
19942 /* Pseudo ops. */
19943 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19944 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19945 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19946 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19947
19948 /* Thumb-compatibility pseudo ops. */
19949 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19950 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19951 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19952 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19953 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19954 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19955 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19956 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19957 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19958 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19959 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19960 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19961
19962 /* These may simplify to neg. */
19963 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19964 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19965
19966 #undef THUMB_VARIANT
19967 #define THUMB_VARIANT & arm_ext_os
19968
19969 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19970 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19971
19972 #undef THUMB_VARIANT
19973 #define THUMB_VARIANT & arm_ext_v6
19974
19975 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19976
19977 /* V1 instructions with no Thumb analogue prior to V6T2. */
19978 #undef THUMB_VARIANT
19979 #define THUMB_VARIANT & arm_ext_v6t2
19980
19981 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19982 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19983 CL("teqp", 130f000, 2, (RR, SH), cmp),
19984
19985 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19986 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19987 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19988 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19989
19990 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19991 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19992
19993 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19994 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19995
19996 /* V1 instructions with no Thumb analogue at all. */
19997 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19998 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19999
20000 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
20001 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
20002 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
20003 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
20004 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
20005 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
20006 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
20007 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
20008
20009 #undef ARM_VARIANT
20010 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20011 #undef THUMB_VARIANT
20012 #define THUMB_VARIANT & arm_ext_v4t
20013
20014 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20015 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20016
20017 #undef THUMB_VARIANT
20018 #define THUMB_VARIANT & arm_ext_v6t2
20019
20020 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20021 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
20022
20023 /* Generic coprocessor instructions. */
20024 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20025 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20026 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20027 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20028 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20029 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20030 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
20031
20032 #undef ARM_VARIANT
20033 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20034
20035 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20036 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20037
20038 #undef ARM_VARIANT
20039 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20040 #undef THUMB_VARIANT
20041 #define THUMB_VARIANT & arm_ext_msr
20042
20043 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
20044 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
20045
20046 #undef ARM_VARIANT
20047 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20048 #undef THUMB_VARIANT
20049 #define THUMB_VARIANT & arm_ext_v6t2
20050
20051 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20052 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20053 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20054 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20055 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20056 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20057 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20058 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20059
20060 #undef ARM_VARIANT
20061 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20062 #undef THUMB_VARIANT
20063 #define THUMB_VARIANT & arm_ext_v4t
20064
20065 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20066 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20067 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20068 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20069 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20070 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20071
20072 #undef ARM_VARIANT
20073 #define ARM_VARIANT & arm_ext_v4t_5
20074
20075 /* ARM Architecture 4T. */
20076 /* Note: bx (and blx) are required on V5, even if the processor does
20077 not support Thumb. */
20078 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
20079
20080 #undef ARM_VARIANT
20081 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20082 #undef THUMB_VARIANT
20083 #define THUMB_VARIANT & arm_ext_v5t
20084
20085 /* Note: blx has 2 variants; the .value coded here is for
20086 BLX(2). Only this variant has conditional execution. */
20087 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
20088 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
20089
20090 #undef THUMB_VARIANT
20091 #define THUMB_VARIANT & arm_ext_v6t2
20092
20093 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
20094 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20095 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20096 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20097 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20098 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20099 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20100 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20101
20102 #undef ARM_VARIANT
20103 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20104 #undef THUMB_VARIANT
20105 #define THUMB_VARIANT & arm_ext_v5exp
20106
20107 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20108 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20109 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20110 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20111
20112 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20113 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20114
20115 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20116 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20117 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20118 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20119
20120 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20121 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20122 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20123 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20124
20125 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20126 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20127
20128 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20129 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20130 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20131 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20132
20133 #undef ARM_VARIANT
20134 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20135 #undef THUMB_VARIANT
20136 #define THUMB_VARIANT & arm_ext_v6t2
20137
20138 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
20139 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
20140 ldrd, t_ldstd),
20141 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
20142 ADDRGLDRS), ldrd, t_ldstd),
20143
20144 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20145 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20146
20147 #undef ARM_VARIANT
20148 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20149
20150 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
20151
20152 #undef ARM_VARIANT
20153 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20154 #undef THUMB_VARIANT
20155 #define THUMB_VARIANT & arm_ext_v6
20156
20157 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
20158 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
20159 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20160 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20161 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20162 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20163 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20164 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20165 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20166 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
20167
20168 #undef THUMB_VARIANT
20169 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20170
20171 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
20172 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20173 strex, t_strex),
20174 #undef THUMB_VARIANT
20175 #define THUMB_VARIANT & arm_ext_v6t2
20176
20177 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20178 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20179
20180 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
20181 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
20182
20183 /* ARM V6 not included in V7M. */
20184 #undef THUMB_VARIANT
20185 #define THUMB_VARIANT & arm_ext_v6_notm
20186 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20187 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20188 UF(rfeib, 9900a00, 1, (RRw), rfe),
20189 UF(rfeda, 8100a00, 1, (RRw), rfe),
20190 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20191 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20192 UF(rfefa, 8100a00, 1, (RRw), rfe),
20193 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20194 UF(rfeed, 9900a00, 1, (RRw), rfe),
20195 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20196 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20197 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20198 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
20199 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
20200 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
20201 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
20202 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20203 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20204 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
20205
20206 /* ARM V6 not included in V7M (eg. integer SIMD). */
20207 #undef THUMB_VARIANT
20208 #define THUMB_VARIANT & arm_ext_v6_dsp
20209 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
20210 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
20211 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20212 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20213 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20214 /* Old name for QASX. */
20215 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20216 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20217 /* Old name for QSAX. */
20218 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20219 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20220 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20221 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20222 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20223 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20224 /* Old name for SASX. */
20225 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20226 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20227 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20228 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20229 /* Old name for SHASX. */
20230 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20231 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20232 /* Old name for SHSAX. */
20233 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20234 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20235 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20236 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20237 /* Old name for SSAX. */
20238 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20239 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20240 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20241 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20242 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20243 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20244 /* Old name for UASX. */
20245 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20246 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20247 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20248 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20249 /* Old name for UHASX. */
20250 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20251 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20252 /* Old name for UHSAX. */
20253 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20254 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20255 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20256 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20257 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20258 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20259 /* Old name for UQASX. */
20260 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20261 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20262 /* Old name for UQSAX. */
20263 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20264 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20265 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20266 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20267 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20268 /* Old name for USAX. */
20269 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20270 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20271 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20272 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20273 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20274 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20275 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20276 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20277 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20278 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20279 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20280 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20281 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20282 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20283 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20284 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20285 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20286 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20287 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20288 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20289 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20290 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20291 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20292 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20293 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20294 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20295 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20296 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20297 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20298 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
20299 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
20300 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20301 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20302 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
20303
20304 #undef ARM_VARIANT
20305 #define ARM_VARIANT & arm_ext_v6k_v6t2
20306 #undef THUMB_VARIANT
20307 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20308
20309 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
20310 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
20311 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20312 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20313
20314 #undef THUMB_VARIANT
20315 #define THUMB_VARIANT & arm_ext_v6_notm
20316 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20317 ldrexd, t_ldrexd),
20318 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20319 RRnpcb), strexd, t_strexd),
20320
20321 #undef THUMB_VARIANT
20322 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20323 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20324 rd_rn, rd_rn),
20325 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20326 rd_rn, rd_rn),
20327 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20328 strex, t_strexbh),
20329 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20330 strex, t_strexbh),
20331 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20332
20333 #undef ARM_VARIANT
20334 #define ARM_VARIANT & arm_ext_sec
20335 #undef THUMB_VARIANT
20336 #define THUMB_VARIANT & arm_ext_sec
20337
20338 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20339
20340 #undef ARM_VARIANT
20341 #define ARM_VARIANT & arm_ext_virt
20342 #undef THUMB_VARIANT
20343 #define THUMB_VARIANT & arm_ext_virt
20344
20345 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20346 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20347
20348 #undef ARM_VARIANT
20349 #define ARM_VARIANT & arm_ext_pan
20350 #undef THUMB_VARIANT
20351 #define THUMB_VARIANT & arm_ext_pan
20352
20353 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20354
20355 #undef ARM_VARIANT
20356 #define ARM_VARIANT & arm_ext_v6t2
20357 #undef THUMB_VARIANT
20358 #define THUMB_VARIANT & arm_ext_v6t2
20359
20360 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20361 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20362 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20363 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20364
20365 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20366 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20367
20368 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20369 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20370 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20371 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20372
20373 #undef ARM_VARIANT
20374 #define ARM_VARIANT & arm_ext_v3
20375 #undef THUMB_VARIANT
20376 #define THUMB_VARIANT & arm_ext_v6t2
20377
20378 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20379 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20380 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20381
20382 #undef ARM_VARIANT
20383 #define ARM_VARIANT & arm_ext_v6t2
20384 #undef THUMB_VARIANT
20385 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20386 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20387 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20388
20389 /* Thumb-only instructions. */
20390 #undef ARM_VARIANT
20391 #define ARM_VARIANT NULL
20392 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20393 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20394
20395 /* ARM does not really have an IT instruction, so always allow it.
20396 The opcode is copied from Thumb in order to allow warnings in
20397 -mimplicit-it=[never | arm] modes. */
20398 #undef ARM_VARIANT
20399 #define ARM_VARIANT & arm_ext_v1
20400 #undef THUMB_VARIANT
20401 #define THUMB_VARIANT & arm_ext_v6t2
20402
20403 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20404 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20405 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20406 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20407 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20408 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20409 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20410 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20411 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20412 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20413 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20414 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20415 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20416 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20417 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20418 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20419 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20420 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20421
20422 /* Thumb2 only instructions. */
20423 #undef ARM_VARIANT
20424 #define ARM_VARIANT NULL
20425
20426 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20427 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20428 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20429 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20430 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20431 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20432
20433 /* Hardware division instructions. */
20434 #undef ARM_VARIANT
20435 #define ARM_VARIANT & arm_ext_adiv
20436 #undef THUMB_VARIANT
20437 #define THUMB_VARIANT & arm_ext_div
20438
20439 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20440 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20441
20442 /* ARM V6M/V7 instructions. */
20443 #undef ARM_VARIANT
20444 #define ARM_VARIANT & arm_ext_barrier
20445 #undef THUMB_VARIANT
20446 #define THUMB_VARIANT & arm_ext_barrier
20447
20448 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20449 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20450 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20451
20452 /* ARM V7 instructions. */
20453 #undef ARM_VARIANT
20454 #define ARM_VARIANT & arm_ext_v7
20455 #undef THUMB_VARIANT
20456 #define THUMB_VARIANT & arm_ext_v7
20457
20458 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20459 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20460
20461 #undef ARM_VARIANT
20462 #define ARM_VARIANT & arm_ext_mp
20463 #undef THUMB_VARIANT
20464 #define THUMB_VARIANT & arm_ext_mp
20465
20466 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20467
20468 /* AArchv8 instructions. */
20469 #undef ARM_VARIANT
20470 #define ARM_VARIANT & arm_ext_v8
20471
20472 /* Instructions shared between armv8-a and armv8-m. */
20473 #undef THUMB_VARIANT
20474 #define THUMB_VARIANT & arm_ext_atomics
20475
20476 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20477 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20478 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20479 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20480 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20481 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20482 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20483 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20484 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20485 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20486 stlex, t_stlex),
20487 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20488 stlex, t_stlex),
20489 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20490 stlex, t_stlex),
20491 #undef THUMB_VARIANT
20492 #define THUMB_VARIANT & arm_ext_v8
20493
20494 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20495 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20496 ldrexd, t_ldrexd),
20497 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20498 strexd, t_strexd),
20499
20500 /* Defined in V8 but is in undefined encoding space for earlier
20501 architectures. However earlier architectures are required to treat
20502 this instuction as a semihosting trap as well. Hence while not explicitly
20503 defined as such, it is in fact correct to define the instruction for all
20504 architectures. */
20505 #undef THUMB_VARIANT
20506 #define THUMB_VARIANT & arm_ext_v1
20507 #undef ARM_VARIANT
20508 #define ARM_VARIANT & arm_ext_v1
20509 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20510
20511 /* ARMv8 T32 only. */
20512 #undef ARM_VARIANT
20513 #define ARM_VARIANT NULL
20514 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20515 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20516 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20517
20518 /* FP for ARMv8. */
20519 #undef ARM_VARIANT
20520 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20521 #undef THUMB_VARIANT
20522 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20523
20524 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20525 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20526 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20527 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20528 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20529 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20530 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20531 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20532 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20533 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20534 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20535 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20536 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20537 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20538 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20539 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20540 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20541
20542 /* Crypto v1 extensions. */
20543 #undef ARM_VARIANT
20544 #define ARM_VARIANT & fpu_crypto_ext_armv8
20545 #undef THUMB_VARIANT
20546 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20547
20548 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20549 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20550 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20551 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20552 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20553 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20554 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20555 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20556 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20557 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20558 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20559 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20560 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20561 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20562
20563 #undef ARM_VARIANT
20564 #define ARM_VARIANT & crc_ext_armv8
20565 #undef THUMB_VARIANT
20566 #define THUMB_VARIANT & crc_ext_armv8
20567 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20568 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20569 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20570 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20571 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20572 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20573
20574 /* ARMv8.2 RAS extension. */
20575 #undef ARM_VARIANT
20576 #define ARM_VARIANT & arm_ext_ras
20577 #undef THUMB_VARIANT
20578 #define THUMB_VARIANT & arm_ext_ras
20579 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20580
20581 #undef ARM_VARIANT
20582 #define ARM_VARIANT & arm_ext_v8_3
20583 #undef THUMB_VARIANT
20584 #define THUMB_VARIANT & arm_ext_v8_3
20585 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20586 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20587 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20588
20589 #undef ARM_VARIANT
20590 #define ARM_VARIANT & fpu_neon_ext_dotprod
20591 #undef THUMB_VARIANT
20592 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20593 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20594 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20595
20596 #undef ARM_VARIANT
20597 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20598 #undef THUMB_VARIANT
20599 #define THUMB_VARIANT NULL
20600
20601 cCE("wfs", e200110, 1, (RR), rd),
20602 cCE("rfs", e300110, 1, (RR), rd),
20603 cCE("wfc", e400110, 1, (RR), rd),
20604 cCE("rfc", e500110, 1, (RR), rd),
20605
20606 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20607 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20608 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20609 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20610
20611 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20612 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20613 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20614 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20615
20616 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20617 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20618 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20619 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20620 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20621 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20622 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20623 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20624 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20625 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20626 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20627 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20628
20629 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20630 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20631 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20632 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20633 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20634 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20635 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20636 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20637 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20638 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20639 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20640 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20641
20642 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20643 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20644 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20645 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20646 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20647 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20648 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20649 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20650 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20651 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20652 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20653 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20654
20655 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20656 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20657 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20658 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20659 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20660 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20661 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20662 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20663 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20664 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20665 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20666 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20667
20668 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20669 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20670 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20671 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20672 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20673 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20674 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20675 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20676 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20677 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20678 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20679 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20680
20681 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20682 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20683 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20684 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20685 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20686 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20687 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20688 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20689 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20690 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20691 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20692 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20693
20694 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20695 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20696 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20697 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20698 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20699 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20700 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20701 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20702 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20703 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20704 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20705 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20706
20707 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20708 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20709 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20710 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20711 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20712 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20713 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20714 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20715 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20716 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20717 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20718 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20719
20720 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20721 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20722 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20723 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20724 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20725 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20726 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20727 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20728 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20729 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20730 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20731 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20732
20733 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20734 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20735 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20736 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20737 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20738 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20739 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20740 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20741 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20742 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20743 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20744 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20745
20746 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20747 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20748 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20749 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20750 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20751 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20752 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20753 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20754 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20755 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20756 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20757 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20758
20759 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20760 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20761 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20762 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20763 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20764 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20765 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20766 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20767 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20768 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20769 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20770 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20771
20772 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20773 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20774 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20775 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20776 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20777 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20778 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20779 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20780 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20781 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20782 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20783 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20784
20785 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20786 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20787 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20788 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20789 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20790 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20791 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20792 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20793 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20794 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20795 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20796 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20797
20798 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20799 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20800 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20801 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20802 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20803 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20804 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20805 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20806 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20807 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20808 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20809 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20810
20811 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20812 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20813 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20814 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20815 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20816 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20817 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20818 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20819 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20820 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20821 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20822 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20823
20824 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20825 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20826 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20827 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20828 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20829 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20830 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20831 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20832 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20833 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20834 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20835 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20836
20837 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20838 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20839 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20840 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20841 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20842 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20843 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20844 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20845 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20846 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20847 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20848 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20849
20850 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20851 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20852 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20853 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20854 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20855 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20856 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20857 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20858 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20859 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20860 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20861 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20862
20863 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20864 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20865 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20866 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20867 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20868 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20869 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20870 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20871 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20872 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20873 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20874 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20875
20876 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20877 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20878 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20879 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20880 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20881 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20882 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20883 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20884 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20885 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20886 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20887 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20888
20889 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20890 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20891 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20892 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20893 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20894 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20895 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20896 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20897 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20898 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20899 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20900 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20901
20902 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20903 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20904 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20905 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20906 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20907 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20908 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20909 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20910 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20911 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20912 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20913 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20914
20915 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20916 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20917 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20918 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20919 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20920 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20921 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20922 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20923 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20924 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20925 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20926 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20927
20928 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20929 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20930 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20931 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20932 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20933 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20934 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20935 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20936 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20937 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20938 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20939 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20940
20941 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20942 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20943 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20944 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20945 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20946 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20947 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20948 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20949 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20950 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20951 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20952 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20953
20954 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20955 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20956 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20957 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20958 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20959 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20960 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20961 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20962 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20963 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20964 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20965 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20966
20967 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20968 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20969 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20970 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20971 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20972 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20973 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20974 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20975 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20976 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20977 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20978 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20979
20980 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20981 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20982 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20983 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20984 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20985 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20986 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20987 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20988 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20989 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20990 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20991 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20992
20993 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20994 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20995 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20996 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20997
20998 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20999 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
21000 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
21001 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
21002 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
21003 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
21004 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
21005 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
21006 cCL("flte", e080110, 2, (RF, RR), rn_rd),
21007 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
21008 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
21009 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
21010
21011 /* The implementation of the FIX instruction is broken on some
21012 assemblers, in that it accepts a precision specifier as well as a
21013 rounding specifier, despite the fact that this is meaningless.
21014 To be more compatible, we accept it as well, though of course it
21015 does not set any bits. */
21016 cCE("fix", e100110, 2, (RR, RF), rd_rm),
21017 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
21018 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
21019 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
21020 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
21021 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
21022 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
21023 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
21024 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
21025 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
21026 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
21027 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
21028 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
21029
21030 /* Instructions that were new with the real FPA, call them V2. */
21031 #undef ARM_VARIANT
21032 #define ARM_VARIANT & fpu_fpa_ext_v2
21033
21034 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21035 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21036 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21037 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21038 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21039 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21040
21041 #undef ARM_VARIANT
21042 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21043
21044 /* Moves and type conversions. */
21045 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
21046 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
21047 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
21048 cCE("fmstat", ef1fa10, 0, (), noargs),
21049 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
21050 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
21051 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
21052 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
21053 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
21054 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21055 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
21056 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21057 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
21058 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
21059
21060 /* Memory operations. */
21061 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21062 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21063 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21064 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21065 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21066 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21067 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21068 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21069 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21070 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21071 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21072 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21073 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21074 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21075 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21076 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21077 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21078 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21079
21080 /* Monadic operations. */
21081 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
21082 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
21083 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
21084
21085 /* Dyadic operations. */
21086 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21087 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21088 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21089 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21090 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21091 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21092 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21093 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21094 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21095
21096 /* Comparisons. */
21097 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
21098 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
21099 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
21100 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
21101
21102 /* Double precision load/store are still present on single precision
21103 implementations. */
21104 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21105 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21106 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21107 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21108 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21109 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21110 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21111 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21112 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21113 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21114
21115 #undef ARM_VARIANT
21116 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21117
21118 /* Moves and type conversions. */
21119 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21120 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21121 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21122 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
21123 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
21124 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
21125 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
21126 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21127 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
21128 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21129 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21130 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21131 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21132
21133 /* Monadic operations. */
21134 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21135 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21136 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21137
21138 /* Dyadic operations. */
21139 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21140 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21141 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21142 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21143 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21144 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21145 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21146 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21147 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21148
21149 /* Comparisons. */
21150 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21151 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
21152 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21153 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
21154
21155 #undef ARM_VARIANT
21156 #define ARM_VARIANT & fpu_vfp_ext_v2
21157
21158 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
21159 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
21160 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
21161 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
21162
21163 /* Instructions which may belong to either the Neon or VFP instruction sets.
21164 Individual encoder functions perform additional architecture checks. */
21165 #undef ARM_VARIANT
21166 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21167 #undef THUMB_VARIANT
21168 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21169
21170 /* These mnemonics are unique to VFP. */
21171 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
21172 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
21173 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21174 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21175 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21176 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21177 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21178 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
21179 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
21180 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
21181
21182 /* Mnemonics shared by Neon and VFP. */
21183 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
21184 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21185 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21186
21187 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21188 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21189
21190 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21191 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21192
21193 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21194 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21195 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21196 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21197 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21198 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21199 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
21200 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
21201
21202 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
21203 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
21204 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
21205 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
21206
21207
21208 /* NOTE: All VMOV encoding is special-cased! */
21209 NCE(vmov, 0, 1, (VMOV), neon_mov),
21210 NCE(vmovq, 0, 1, (VMOV), neon_mov),
21211
21212 #undef ARM_VARIANT
21213 #define ARM_VARIANT & arm_ext_fp16
21214 #undef THUMB_VARIANT
21215 #define THUMB_VARIANT & arm_ext_fp16
21216 /* New instructions added from v8.2, allowing the extraction and insertion of
21217 the upper 16 bits of a 32-bit vector register. */
21218 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
21219 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
21220
21221 /* New backported fma/fms instructions optional in v8.2. */
21222 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
21223 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
21224
21225 #undef THUMB_VARIANT
21226 #define THUMB_VARIANT & fpu_neon_ext_v1
21227 #undef ARM_VARIANT
21228 #define ARM_VARIANT & fpu_neon_ext_v1
21229
21230 /* Data processing with three registers of the same length. */
21231 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21232 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
21233 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
21234 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21235 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21236 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21237 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21238 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21239 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21240 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21241 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21242 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21243 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21244 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21245 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21246 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21247 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21248 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21249 /* If not immediate, fall back to neon_dyadic_i64_su.
21250 shl_imm should accept I8 I16 I32 I64,
21251 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21252 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
21253 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
21254 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
21255 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
21256 /* Logic ops, types optional & ignored. */
21257 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21258 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21259 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21260 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21261 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21262 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21263 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21264 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21265 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
21266 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
21267 /* Bitfield ops, untyped. */
21268 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21269 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21270 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21271 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21272 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21273 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21274 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21275 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21276 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21277 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21278 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21279 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21280 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21281 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21282 back to neon_dyadic_if_su. */
21283 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21284 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21285 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21286 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21287 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21288 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21289 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21290 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21291 /* Comparison. Type I8 I16 I32 F32. */
21292 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
21293 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
21294 /* As above, D registers only. */
21295 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21296 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21297 /* Int and float variants, signedness unimportant. */
21298 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21299 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21300 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
21301 /* Add/sub take types I8 I16 I32 I64 F32. */
21302 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21303 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21304 /* vtst takes sizes 8, 16, 32. */
21305 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
21306 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
21307 /* VMUL takes I8 I16 I32 F32 P8. */
21308 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
21309 /* VQD{R}MULH takes S16 S32. */
21310 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21311 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21312 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21313 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21314 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21315 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21316 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21317 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21318 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21319 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21320 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21321 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21322 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21323 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21324 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21325 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21326 /* ARM v8.1 extension. */
21327 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21328 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21329 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21330 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21331
21332 /* Two address, int/float. Types S8 S16 S32 F32. */
21333 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21334 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21335
21336 /* Data processing with two registers and a shift amount. */
21337 /* Right shifts, and variants with rounding.
21338 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21339 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21340 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21341 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21342 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21343 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21344 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21345 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21346 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21347 /* Shift and insert. Sizes accepted 8 16 32 64. */
21348 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21349 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21350 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21351 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21352 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21353 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21354 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21355 /* Right shift immediate, saturating & narrowing, with rounding variants.
21356 Types accepted S16 S32 S64 U16 U32 U64. */
21357 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21358 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21359 /* As above, unsigned. Types accepted S16 S32 S64. */
21360 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21361 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21362 /* Right shift narrowing. Types accepted I16 I32 I64. */
21363 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21364 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21365 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21366 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21367 /* CVT with optional immediate for fixed-point variant. */
21368 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21369
21370 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21371 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21372
21373 /* Data processing, three registers of different lengths. */
21374 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21375 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21376 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21377 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21378 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21379 /* If not scalar, fall back to neon_dyadic_long.
21380 Vector types as above, scalar types S16 S32 U16 U32. */
21381 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21382 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21383 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21384 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21385 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21386 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21387 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21388 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21389 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21390 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21391 /* Saturating doubling multiplies. Types S16 S32. */
21392 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21393 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21394 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21395 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21396 S16 S32 U16 U32. */
21397 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21398
21399 /* Extract. Size 8. */
21400 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21401 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21402
21403 /* Two registers, miscellaneous. */
21404 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21405 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21406 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21407 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21408 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21409 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21410 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21411 /* Vector replicate. Sizes 8 16 32. */
21412 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21413 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21414 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21415 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21416 /* VMOVN. Types I16 I32 I64. */
21417 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21418 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21419 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21420 /* VQMOVUN. Types S16 S32 S64. */
21421 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21422 /* VZIP / VUZP. Sizes 8 16 32. */
21423 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21424 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21425 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21426 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21427 /* VQABS / VQNEG. Types S8 S16 S32. */
21428 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21429 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21430 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21431 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21432 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21433 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21434 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21435 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21436 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21437 /* Reciprocal estimates. Types U32 F16 F32. */
21438 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21439 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21440 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21441 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21442 /* VCLS. Types S8 S16 S32. */
21443 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21444 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21445 /* VCLZ. Types I8 I16 I32. */
21446 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21447 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21448 /* VCNT. Size 8. */
21449 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21450 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21451 /* Two address, untyped. */
21452 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21453 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21454 /* VTRN. Sizes 8 16 32. */
21455 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21456 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21457
21458 /* Table lookup. Size 8. */
21459 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21460 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21461
21462 #undef THUMB_VARIANT
21463 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21464 #undef ARM_VARIANT
21465 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21466
21467 /* Neon element/structure load/store. */
21468 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21469 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21470 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21471 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21472 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21473 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21474 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21475 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21476
21477 #undef THUMB_VARIANT
21478 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21479 #undef ARM_VARIANT
21480 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21481 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21482 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21483 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21484 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21485 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21486 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21487 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21488 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21489 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21490
21491 #undef THUMB_VARIANT
21492 #define THUMB_VARIANT & fpu_vfp_ext_v3
21493 #undef ARM_VARIANT
21494 #define ARM_VARIANT & fpu_vfp_ext_v3
21495
21496 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21497 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21498 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21499 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21500 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21501 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21502 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21503 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21504 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21505
21506 #undef ARM_VARIANT
21507 #define ARM_VARIANT & fpu_vfp_ext_fma
21508 #undef THUMB_VARIANT
21509 #define THUMB_VARIANT & fpu_vfp_ext_fma
21510 /* Mnemonics shared by Neon and VFP. These are included in the
21511 VFP FMA variant; NEON and VFP FMA always includes the NEON
21512 FMA instructions. */
21513 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21514 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21515 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21516 the v form should always be used. */
21517 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21518 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21519 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21520 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21521 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21522 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21523
21524 #undef THUMB_VARIANT
21525 #undef ARM_VARIANT
21526 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21527
21528 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21529 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21530 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21531 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21532 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21533 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21534 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21535 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21536
21537 #undef ARM_VARIANT
21538 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21539
21540 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21541 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21542 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21543 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21544 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21545 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21546 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21547 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21548 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21549 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21550 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21551 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21552 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21553 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21554 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21555 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21556 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21557 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21558 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21559 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21560 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21561 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21562 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21563 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21564 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21565 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21566 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21567 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21568 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21569 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21570 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21571 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21572 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21573 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21574 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21575 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21576 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21577 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21578 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21579 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21580 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21581 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21582 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21583 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21584 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21585 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21586 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21587 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21588 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21589 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21590 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21591 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21592 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21593 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21594 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21595 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21596 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21597 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21598 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21599 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21600 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21601 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21602 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21603 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21604 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21605 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21606 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21607 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21608 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21609 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21610 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21611 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21612 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21613 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21614 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21615 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21616 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21617 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21618 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21619 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21620 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21621 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21622 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21623 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21624 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21625 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21626 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21627 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21628 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21629 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21630 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21631 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21632 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21633 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21634 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21635 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21636 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21637 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21638 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21639 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21640 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21641 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21642 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21643 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21644 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21645 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21646 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21647 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21648 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21649 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21650 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21651 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21652 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21653 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21654 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21655 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21656 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21657 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21658 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21659 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21660 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21661 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21662 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21663 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21664 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21665 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21666 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21667 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21668 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21669 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21670 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21671 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21672 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21673 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21674 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21675 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21676 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21677 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21678 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21679 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21680 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21681 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21682 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21683 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21684 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21685 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21686 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21687 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21688 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21689 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21690 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21691 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21692 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21693 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21694 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21695 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21696 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21697 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21698 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21699 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21700 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21701 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21702
21703 #undef ARM_VARIANT
21704 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21705
21706 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21707 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21708 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21709 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21710 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21711 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21712 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21713 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21714 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21715 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21716 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21717 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21718 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21719 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21720 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21721 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21722 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21723 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21724 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21725 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21726 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21727 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21728 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21729 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21730 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21731 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21732 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21733 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21734 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21735 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21736 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21737 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21738 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21739 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21740 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21741 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21742 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21743 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21744 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21745 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21746 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21747 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21748 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21749 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21750 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21751 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21752 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21753 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21754 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21755 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21756 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21757 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21758 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21759 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21760 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21761 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21762 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21763
21764 #undef ARM_VARIANT
21765 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21766
21767 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21768 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21769 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21770 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21771 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21772 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21773 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21774 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21775 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21776 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21777 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21778 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21779 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21780 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21781 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21782 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21783 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21784 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21785 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21786 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21787 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21788 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21789 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21790 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21791 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21792 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21793 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21794 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21795 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21796 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21797 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21798 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21799 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21800 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21801 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21802 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21803 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21804 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21805 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21806 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21807 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21808 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21809 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21810 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21811 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21812 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21813 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21814 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21815 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21816 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21817 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21818 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21819 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21820 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21821 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21822 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21823 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21824 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21825 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21826 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21827 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21828 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21829 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21830 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21831 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21832 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21833 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21834 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21835 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21836 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21837 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21838 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21839 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21840 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21841 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21842 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21843
21844 /* ARMv8.5-A instructions. */
21845 #undef ARM_VARIANT
21846 #define ARM_VARIANT & arm_ext_sb
21847 #undef THUMB_VARIANT
21848 #define THUMB_VARIANT & arm_ext_sb
21849 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
21850
21851 #undef ARM_VARIANT
21852 #define ARM_VARIANT & arm_ext_predres
21853 #undef THUMB_VARIANT
21854 #define THUMB_VARIANT & arm_ext_predres
21855 CE("cfprctx", e070f93, 1, (RRnpc), rd),
21856 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
21857 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
21858
21859 /* ARMv8-M instructions. */
21860 #undef ARM_VARIANT
21861 #define ARM_VARIANT NULL
21862 #undef THUMB_VARIANT
21863 #define THUMB_VARIANT & arm_ext_v8m
21864 ToU("sg", e97fe97f, 0, (), noargs),
21865 ToC("blxns", 4784, 1, (RRnpc), t_blx),
21866 ToC("bxns", 4704, 1, (RRnpc), t_bx),
21867 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
21868 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
21869 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
21870 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
21871
21872 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21873 instructions behave as nop if no VFP is present. */
21874 #undef THUMB_VARIANT
21875 #define THUMB_VARIANT & arm_ext_v8m_main
21876 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
21877 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
21878
21879 /* Armv8.1-M Mainline instructions. */
21880 #undef THUMB_VARIANT
21881 #define THUMB_VARIANT & arm_ext_v8_1m_main
21882 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
21883 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
21884 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
21885 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
21886 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
21887
21888 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
21889 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
21890 toU("le", _le, 2, (oLR, EXP), t_loloop),
21891
21892 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm)
21893 };
21894 #undef ARM_VARIANT
21895 #undef THUMB_VARIANT
21896 #undef TCE
21897 #undef TUE
21898 #undef TUF
21899 #undef TCC
21900 #undef cCE
21901 #undef cCL
21902 #undef C3E
21903 #undef C3
21904 #undef CE
21905 #undef CM
21906 #undef CL
21907 #undef UE
21908 #undef UF
21909 #undef UT
21910 #undef NUF
21911 #undef nUF
21912 #undef NCE
21913 #undef nCE
21914 #undef OPS0
21915 #undef OPS1
21916 #undef OPS2
21917 #undef OPS3
21918 #undef OPS4
21919 #undef OPS5
21920 #undef OPS6
21921 #undef do_0
21922 #undef ToC
21923 #undef toC
21924 #undef ToU
21925 #undef toU
21926 \f
21927 /* MD interface: bits in the object file. */
21928
21929 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21930 for use in the a.out file, and stores them in the array pointed to by buf.
21931 This knows about the endian-ness of the target machine and does
21932 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21933 2 (short) and 4 (long) Floating numbers are put out as a series of
21934 LITTLENUMS (shorts, here at least). */
21935
21936 void
21937 md_number_to_chars (char * buf, valueT val, int n)
21938 {
21939 if (target_big_endian)
21940 number_to_chars_bigendian (buf, val, n);
21941 else
21942 number_to_chars_littleendian (buf, val, n);
21943 }
21944
21945 static valueT
21946 md_chars_to_number (char * buf, int n)
21947 {
21948 valueT result = 0;
21949 unsigned char * where = (unsigned char *) buf;
21950
21951 if (target_big_endian)
21952 {
21953 while (n--)
21954 {
21955 result <<= 8;
21956 result |= (*where++ & 255);
21957 }
21958 }
21959 else
21960 {
21961 while (n--)
21962 {
21963 result <<= 8;
21964 result |= (where[n] & 255);
21965 }
21966 }
21967
21968 return result;
21969 }
21970
21971 /* MD interface: Sections. */
21972
21973 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21974 that an rs_machine_dependent frag may reach. */
21975
21976 unsigned int
21977 arm_frag_max_var (fragS *fragp)
21978 {
21979 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21980 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21981
21982 Note that we generate relaxable instructions even for cases that don't
21983 really need it, like an immediate that's a trivial constant. So we're
21984 overestimating the instruction size for some of those cases. Rather
21985 than putting more intelligence here, it would probably be better to
21986 avoid generating a relaxation frag in the first place when it can be
21987 determined up front that a short instruction will suffice. */
21988
21989 gas_assert (fragp->fr_type == rs_machine_dependent);
21990 return INSN_SIZE;
21991 }
21992
21993 /* Estimate the size of a frag before relaxing. Assume everything fits in
21994 2 bytes. */
21995
21996 int
21997 md_estimate_size_before_relax (fragS * fragp,
21998 segT segtype ATTRIBUTE_UNUSED)
21999 {
22000 fragp->fr_var = 2;
22001 return 2;
22002 }
22003
22004 /* Convert a machine dependent frag. */
22005
22006 void
22007 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
22008 {
22009 unsigned long insn;
22010 unsigned long old_op;
22011 char *buf;
22012 expressionS exp;
22013 fixS *fixp;
22014 int reloc_type;
22015 int pc_rel;
22016 int opcode;
22017
22018 buf = fragp->fr_literal + fragp->fr_fix;
22019
22020 old_op = bfd_get_16(abfd, buf);
22021 if (fragp->fr_symbol)
22022 {
22023 exp.X_op = O_symbol;
22024 exp.X_add_symbol = fragp->fr_symbol;
22025 }
22026 else
22027 {
22028 exp.X_op = O_constant;
22029 }
22030 exp.X_add_number = fragp->fr_offset;
22031 opcode = fragp->fr_subtype;
22032 switch (opcode)
22033 {
22034 case T_MNEM_ldr_pc:
22035 case T_MNEM_ldr_pc2:
22036 case T_MNEM_ldr_sp:
22037 case T_MNEM_str_sp:
22038 case T_MNEM_ldr:
22039 case T_MNEM_ldrb:
22040 case T_MNEM_ldrh:
22041 case T_MNEM_str:
22042 case T_MNEM_strb:
22043 case T_MNEM_strh:
22044 if (fragp->fr_var == 4)
22045 {
22046 insn = THUMB_OP32 (opcode);
22047 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
22048 {
22049 insn |= (old_op & 0x700) << 4;
22050 }
22051 else
22052 {
22053 insn |= (old_op & 7) << 12;
22054 insn |= (old_op & 0x38) << 13;
22055 }
22056 insn |= 0x00000c00;
22057 put_thumb32_insn (buf, insn);
22058 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
22059 }
22060 else
22061 {
22062 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
22063 }
22064 pc_rel = (opcode == T_MNEM_ldr_pc2);
22065 break;
22066 case T_MNEM_adr:
22067 if (fragp->fr_var == 4)
22068 {
22069 insn = THUMB_OP32 (opcode);
22070 insn |= (old_op & 0xf0) << 4;
22071 put_thumb32_insn (buf, insn);
22072 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
22073 }
22074 else
22075 {
22076 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22077 exp.X_add_number -= 4;
22078 }
22079 pc_rel = 1;
22080 break;
22081 case T_MNEM_mov:
22082 case T_MNEM_movs:
22083 case T_MNEM_cmp:
22084 case T_MNEM_cmn:
22085 if (fragp->fr_var == 4)
22086 {
22087 int r0off = (opcode == T_MNEM_mov
22088 || opcode == T_MNEM_movs) ? 0 : 8;
22089 insn = THUMB_OP32 (opcode);
22090 insn = (insn & 0xe1ffffff) | 0x10000000;
22091 insn |= (old_op & 0x700) << r0off;
22092 put_thumb32_insn (buf, insn);
22093 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22094 }
22095 else
22096 {
22097 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
22098 }
22099 pc_rel = 0;
22100 break;
22101 case T_MNEM_b:
22102 if (fragp->fr_var == 4)
22103 {
22104 insn = THUMB_OP32(opcode);
22105 put_thumb32_insn (buf, insn);
22106 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
22107 }
22108 else
22109 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
22110 pc_rel = 1;
22111 break;
22112 case T_MNEM_bcond:
22113 if (fragp->fr_var == 4)
22114 {
22115 insn = THUMB_OP32(opcode);
22116 insn |= (old_op & 0xf00) << 14;
22117 put_thumb32_insn (buf, insn);
22118 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
22119 }
22120 else
22121 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
22122 pc_rel = 1;
22123 break;
22124 case T_MNEM_add_sp:
22125 case T_MNEM_add_pc:
22126 case T_MNEM_inc_sp:
22127 case T_MNEM_dec_sp:
22128 if (fragp->fr_var == 4)
22129 {
22130 /* ??? Choose between add and addw. */
22131 insn = THUMB_OP32 (opcode);
22132 insn |= (old_op & 0xf0) << 4;
22133 put_thumb32_insn (buf, insn);
22134 if (opcode == T_MNEM_add_pc)
22135 reloc_type = BFD_RELOC_ARM_T32_IMM12;
22136 else
22137 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22138 }
22139 else
22140 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22141 pc_rel = 0;
22142 break;
22143
22144 case T_MNEM_addi:
22145 case T_MNEM_addis:
22146 case T_MNEM_subi:
22147 case T_MNEM_subis:
22148 if (fragp->fr_var == 4)
22149 {
22150 insn = THUMB_OP32 (opcode);
22151 insn |= (old_op & 0xf0) << 4;
22152 insn |= (old_op & 0xf) << 16;
22153 put_thumb32_insn (buf, insn);
22154 if (insn & (1 << 20))
22155 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22156 else
22157 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22158 }
22159 else
22160 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22161 pc_rel = 0;
22162 break;
22163 default:
22164 abort ();
22165 }
22166 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
22167 (enum bfd_reloc_code_real) reloc_type);
22168 fixp->fx_file = fragp->fr_file;
22169 fixp->fx_line = fragp->fr_line;
22170 fragp->fr_fix += fragp->fr_var;
22171
22172 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22173 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
22174 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
22175 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
22176 }
22177
22178 /* Return the size of a relaxable immediate operand instruction.
22179 SHIFT and SIZE specify the form of the allowable immediate. */
22180 static int
22181 relax_immediate (fragS *fragp, int size, int shift)
22182 {
22183 offsetT offset;
22184 offsetT mask;
22185 offsetT low;
22186
22187 /* ??? Should be able to do better than this. */
22188 if (fragp->fr_symbol)
22189 return 4;
22190
22191 low = (1 << shift) - 1;
22192 mask = (1 << (shift + size)) - (1 << shift);
22193 offset = fragp->fr_offset;
22194 /* Force misaligned offsets to 32-bit variant. */
22195 if (offset & low)
22196 return 4;
22197 if (offset & ~mask)
22198 return 4;
22199 return 2;
22200 }
22201
22202 /* Get the address of a symbol during relaxation. */
22203 static addressT
22204 relaxed_symbol_addr (fragS *fragp, long stretch)
22205 {
22206 fragS *sym_frag;
22207 addressT addr;
22208 symbolS *sym;
22209
22210 sym = fragp->fr_symbol;
22211 sym_frag = symbol_get_frag (sym);
22212 know (S_GET_SEGMENT (sym) != absolute_section
22213 || sym_frag == &zero_address_frag);
22214 addr = S_GET_VALUE (sym) + fragp->fr_offset;
22215
22216 /* If frag has yet to be reached on this pass, assume it will
22217 move by STRETCH just as we did. If this is not so, it will
22218 be because some frag between grows, and that will force
22219 another pass. */
22220
22221 if (stretch != 0
22222 && sym_frag->relax_marker != fragp->relax_marker)
22223 {
22224 fragS *f;
22225
22226 /* Adjust stretch for any alignment frag. Note that if have
22227 been expanding the earlier code, the symbol may be
22228 defined in what appears to be an earlier frag. FIXME:
22229 This doesn't handle the fr_subtype field, which specifies
22230 a maximum number of bytes to skip when doing an
22231 alignment. */
22232 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
22233 {
22234 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
22235 {
22236 if (stretch < 0)
22237 stretch = - ((- stretch)
22238 & ~ ((1 << (int) f->fr_offset) - 1));
22239 else
22240 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
22241 if (stretch == 0)
22242 break;
22243 }
22244 }
22245 if (f != NULL)
22246 addr += stretch;
22247 }
22248
22249 return addr;
22250 }
22251
22252 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22253 load. */
22254 static int
22255 relax_adr (fragS *fragp, asection *sec, long stretch)
22256 {
22257 addressT addr;
22258 offsetT val;
22259
22260 /* Assume worst case for symbols not known to be in the same section. */
22261 if (fragp->fr_symbol == NULL
22262 || !S_IS_DEFINED (fragp->fr_symbol)
22263 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22264 || S_IS_WEAK (fragp->fr_symbol))
22265 return 4;
22266
22267 val = relaxed_symbol_addr (fragp, stretch);
22268 addr = fragp->fr_address + fragp->fr_fix;
22269 addr = (addr + 4) & ~3;
22270 /* Force misaligned targets to 32-bit variant. */
22271 if (val & 3)
22272 return 4;
22273 val -= addr;
22274 if (val < 0 || val > 1020)
22275 return 4;
22276 return 2;
22277 }
22278
22279 /* Return the size of a relaxable add/sub immediate instruction. */
22280 static int
22281 relax_addsub (fragS *fragp, asection *sec)
22282 {
22283 char *buf;
22284 int op;
22285
22286 buf = fragp->fr_literal + fragp->fr_fix;
22287 op = bfd_get_16(sec->owner, buf);
22288 if ((op & 0xf) == ((op >> 4) & 0xf))
22289 return relax_immediate (fragp, 8, 0);
22290 else
22291 return relax_immediate (fragp, 3, 0);
22292 }
22293
22294 /* Return TRUE iff the definition of symbol S could be pre-empted
22295 (overridden) at link or load time. */
22296 static bfd_boolean
22297 symbol_preemptible (symbolS *s)
22298 {
22299 /* Weak symbols can always be pre-empted. */
22300 if (S_IS_WEAK (s))
22301 return TRUE;
22302
22303 /* Non-global symbols cannot be pre-empted. */
22304 if (! S_IS_EXTERNAL (s))
22305 return FALSE;
22306
22307 #ifdef OBJ_ELF
22308 /* In ELF, a global symbol can be marked protected, or private. In that
22309 case it can't be pre-empted (other definitions in the same link unit
22310 would violate the ODR). */
22311 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
22312 return FALSE;
22313 #endif
22314
22315 /* Other global symbols might be pre-empted. */
22316 return TRUE;
22317 }
22318
22319 /* Return the size of a relaxable branch instruction. BITS is the
22320 size of the offset field in the narrow instruction. */
22321
22322 static int
22323 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
22324 {
22325 addressT addr;
22326 offsetT val;
22327 offsetT limit;
22328
22329 /* Assume worst case for symbols not known to be in the same section. */
22330 if (!S_IS_DEFINED (fragp->fr_symbol)
22331 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22332 || S_IS_WEAK (fragp->fr_symbol))
22333 return 4;
22334
22335 #ifdef OBJ_ELF
22336 /* A branch to a function in ARM state will require interworking. */
22337 if (S_IS_DEFINED (fragp->fr_symbol)
22338 && ARM_IS_FUNC (fragp->fr_symbol))
22339 return 4;
22340 #endif
22341
22342 if (symbol_preemptible (fragp->fr_symbol))
22343 return 4;
22344
22345 val = relaxed_symbol_addr (fragp, stretch);
22346 addr = fragp->fr_address + fragp->fr_fix + 4;
22347 val -= addr;
22348
22349 /* Offset is a signed value *2 */
22350 limit = 1 << bits;
22351 if (val >= limit || val < -limit)
22352 return 4;
22353 return 2;
22354 }
22355
22356
22357 /* Relax a machine dependent frag. This returns the amount by which
22358 the current size of the frag should change. */
22359
22360 int
22361 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22362 {
22363 int oldsize;
22364 int newsize;
22365
22366 oldsize = fragp->fr_var;
22367 switch (fragp->fr_subtype)
22368 {
22369 case T_MNEM_ldr_pc2:
22370 newsize = relax_adr (fragp, sec, stretch);
22371 break;
22372 case T_MNEM_ldr_pc:
22373 case T_MNEM_ldr_sp:
22374 case T_MNEM_str_sp:
22375 newsize = relax_immediate (fragp, 8, 2);
22376 break;
22377 case T_MNEM_ldr:
22378 case T_MNEM_str:
22379 newsize = relax_immediate (fragp, 5, 2);
22380 break;
22381 case T_MNEM_ldrh:
22382 case T_MNEM_strh:
22383 newsize = relax_immediate (fragp, 5, 1);
22384 break;
22385 case T_MNEM_ldrb:
22386 case T_MNEM_strb:
22387 newsize = relax_immediate (fragp, 5, 0);
22388 break;
22389 case T_MNEM_adr:
22390 newsize = relax_adr (fragp, sec, stretch);
22391 break;
22392 case T_MNEM_mov:
22393 case T_MNEM_movs:
22394 case T_MNEM_cmp:
22395 case T_MNEM_cmn:
22396 newsize = relax_immediate (fragp, 8, 0);
22397 break;
22398 case T_MNEM_b:
22399 newsize = relax_branch (fragp, sec, 11, stretch);
22400 break;
22401 case T_MNEM_bcond:
22402 newsize = relax_branch (fragp, sec, 8, stretch);
22403 break;
22404 case T_MNEM_add_sp:
22405 case T_MNEM_add_pc:
22406 newsize = relax_immediate (fragp, 8, 2);
22407 break;
22408 case T_MNEM_inc_sp:
22409 case T_MNEM_dec_sp:
22410 newsize = relax_immediate (fragp, 7, 2);
22411 break;
22412 case T_MNEM_addi:
22413 case T_MNEM_addis:
22414 case T_MNEM_subi:
22415 case T_MNEM_subis:
22416 newsize = relax_addsub (fragp, sec);
22417 break;
22418 default:
22419 abort ();
22420 }
22421
22422 fragp->fr_var = newsize;
22423 /* Freeze wide instructions that are at or before the same location as
22424 in the previous pass. This avoids infinite loops.
22425 Don't freeze them unconditionally because targets may be artificially
22426 misaligned by the expansion of preceding frags. */
22427 if (stretch <= 0 && newsize > 2)
22428 {
22429 md_convert_frag (sec->owner, sec, fragp);
22430 frag_wane (fragp);
22431 }
22432
22433 return newsize - oldsize;
22434 }
22435
22436 /* Round up a section size to the appropriate boundary. */
22437
22438 valueT
22439 md_section_align (segT segment ATTRIBUTE_UNUSED,
22440 valueT size)
22441 {
22442 return size;
22443 }
22444
22445 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22446 of an rs_align_code fragment. */
22447
22448 void
22449 arm_handle_align (fragS * fragP)
22450 {
22451 static unsigned char const arm_noop[2][2][4] =
22452 {
22453 { /* ARMv1 */
22454 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22455 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22456 },
22457 { /* ARMv6k */
22458 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22459 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22460 },
22461 };
22462 static unsigned char const thumb_noop[2][2][2] =
22463 {
22464 { /* Thumb-1 */
22465 {0xc0, 0x46}, /* LE */
22466 {0x46, 0xc0}, /* BE */
22467 },
22468 { /* Thumb-2 */
22469 {0x00, 0xbf}, /* LE */
22470 {0xbf, 0x00} /* BE */
22471 }
22472 };
22473 static unsigned char const wide_thumb_noop[2][4] =
22474 { /* Wide Thumb-2 */
22475 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22476 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22477 };
22478
22479 unsigned bytes, fix, noop_size;
22480 char * p;
22481 const unsigned char * noop;
22482 const unsigned char *narrow_noop = NULL;
22483 #ifdef OBJ_ELF
22484 enum mstate state;
22485 #endif
22486
22487 if (fragP->fr_type != rs_align_code)
22488 return;
22489
22490 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22491 p = fragP->fr_literal + fragP->fr_fix;
22492 fix = 0;
22493
22494 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22495 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22496
22497 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22498
22499 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22500 {
22501 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22502 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22503 {
22504 narrow_noop = thumb_noop[1][target_big_endian];
22505 noop = wide_thumb_noop[target_big_endian];
22506 }
22507 else
22508 noop = thumb_noop[0][target_big_endian];
22509 noop_size = 2;
22510 #ifdef OBJ_ELF
22511 state = MAP_THUMB;
22512 #endif
22513 }
22514 else
22515 {
22516 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22517 ? selected_cpu : arm_arch_none,
22518 arm_ext_v6k) != 0]
22519 [target_big_endian];
22520 noop_size = 4;
22521 #ifdef OBJ_ELF
22522 state = MAP_ARM;
22523 #endif
22524 }
22525
22526 fragP->fr_var = noop_size;
22527
22528 if (bytes & (noop_size - 1))
22529 {
22530 fix = bytes & (noop_size - 1);
22531 #ifdef OBJ_ELF
22532 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22533 #endif
22534 memset (p, 0, fix);
22535 p += fix;
22536 bytes -= fix;
22537 }
22538
22539 if (narrow_noop)
22540 {
22541 if (bytes & noop_size)
22542 {
22543 /* Insert a narrow noop. */
22544 memcpy (p, narrow_noop, noop_size);
22545 p += noop_size;
22546 bytes -= noop_size;
22547 fix += noop_size;
22548 }
22549
22550 /* Use wide noops for the remainder */
22551 noop_size = 4;
22552 }
22553
22554 while (bytes >= noop_size)
22555 {
22556 memcpy (p, noop, noop_size);
22557 p += noop_size;
22558 bytes -= noop_size;
22559 fix += noop_size;
22560 }
22561
22562 fragP->fr_fix += fix;
22563 }
22564
22565 /* Called from md_do_align. Used to create an alignment
22566 frag in a code section. */
22567
22568 void
22569 arm_frag_align_code (int n, int max)
22570 {
22571 char * p;
22572
22573 /* We assume that there will never be a requirement
22574 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22575 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22576 {
22577 char err_msg[128];
22578
22579 sprintf (err_msg,
22580 _("alignments greater than %d bytes not supported in .text sections."),
22581 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22582 as_fatal ("%s", err_msg);
22583 }
22584
22585 p = frag_var (rs_align_code,
22586 MAX_MEM_FOR_RS_ALIGN_CODE,
22587 1,
22588 (relax_substateT) max,
22589 (symbolS *) NULL,
22590 (offsetT) n,
22591 (char *) NULL);
22592 *p = 0;
22593 }
22594
22595 /* Perform target specific initialisation of a frag.
22596 Note - despite the name this initialisation is not done when the frag
22597 is created, but only when its type is assigned. A frag can be created
22598 and used a long time before its type is set, so beware of assuming that
22599 this initialisation is performed first. */
22600
22601 #ifndef OBJ_ELF
22602 void
22603 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22604 {
22605 /* Record whether this frag is in an ARM or a THUMB area. */
22606 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22607 }
22608
22609 #else /* OBJ_ELF is defined. */
22610 void
22611 arm_init_frag (fragS * fragP, int max_chars)
22612 {
22613 bfd_boolean frag_thumb_mode;
22614
22615 /* If the current ARM vs THUMB mode has not already
22616 been recorded into this frag then do so now. */
22617 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22618 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22619
22620 /* PR 21809: Do not set a mapping state for debug sections
22621 - it just confuses other tools. */
22622 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22623 return;
22624
22625 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22626
22627 /* Record a mapping symbol for alignment frags. We will delete this
22628 later if the alignment ends up empty. */
22629 switch (fragP->fr_type)
22630 {
22631 case rs_align:
22632 case rs_align_test:
22633 case rs_fill:
22634 mapping_state_2 (MAP_DATA, max_chars);
22635 break;
22636 case rs_align_code:
22637 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22638 break;
22639 default:
22640 break;
22641 }
22642 }
22643
22644 /* When we change sections we need to issue a new mapping symbol. */
22645
22646 void
22647 arm_elf_change_section (void)
22648 {
22649 /* Link an unlinked unwind index table section to the .text section. */
22650 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22651 && elf_linked_to_section (now_seg) == NULL)
22652 elf_linked_to_section (now_seg) = text_section;
22653 }
22654
22655 int
22656 arm_elf_section_type (const char * str, size_t len)
22657 {
22658 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22659 return SHT_ARM_EXIDX;
22660
22661 return -1;
22662 }
22663 \f
22664 /* Code to deal with unwinding tables. */
22665
22666 static void add_unwind_adjustsp (offsetT);
22667
22668 /* Generate any deferred unwind frame offset. */
22669
22670 static void
22671 flush_pending_unwind (void)
22672 {
22673 offsetT offset;
22674
22675 offset = unwind.pending_offset;
22676 unwind.pending_offset = 0;
22677 if (offset != 0)
22678 add_unwind_adjustsp (offset);
22679 }
22680
22681 /* Add an opcode to this list for this function. Two-byte opcodes should
22682 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22683 order. */
22684
22685 static void
22686 add_unwind_opcode (valueT op, int length)
22687 {
22688 /* Add any deferred stack adjustment. */
22689 if (unwind.pending_offset)
22690 flush_pending_unwind ();
22691
22692 unwind.sp_restored = 0;
22693
22694 if (unwind.opcode_count + length > unwind.opcode_alloc)
22695 {
22696 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22697 if (unwind.opcodes)
22698 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22699 unwind.opcode_alloc);
22700 else
22701 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22702 }
22703 while (length > 0)
22704 {
22705 length--;
22706 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22707 op >>= 8;
22708 unwind.opcode_count++;
22709 }
22710 }
22711
22712 /* Add unwind opcodes to adjust the stack pointer. */
22713
22714 static void
22715 add_unwind_adjustsp (offsetT offset)
22716 {
22717 valueT op;
22718
22719 if (offset > 0x200)
22720 {
22721 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22722 char bytes[5];
22723 int n;
22724 valueT o;
22725
22726 /* Long form: 0xb2, uleb128. */
22727 /* This might not fit in a word so add the individual bytes,
22728 remembering the list is built in reverse order. */
22729 o = (valueT) ((offset - 0x204) >> 2);
22730 if (o == 0)
22731 add_unwind_opcode (0, 1);
22732
22733 /* Calculate the uleb128 encoding of the offset. */
22734 n = 0;
22735 while (o)
22736 {
22737 bytes[n] = o & 0x7f;
22738 o >>= 7;
22739 if (o)
22740 bytes[n] |= 0x80;
22741 n++;
22742 }
22743 /* Add the insn. */
22744 for (; n; n--)
22745 add_unwind_opcode (bytes[n - 1], 1);
22746 add_unwind_opcode (0xb2, 1);
22747 }
22748 else if (offset > 0x100)
22749 {
22750 /* Two short opcodes. */
22751 add_unwind_opcode (0x3f, 1);
22752 op = (offset - 0x104) >> 2;
22753 add_unwind_opcode (op, 1);
22754 }
22755 else if (offset > 0)
22756 {
22757 /* Short opcode. */
22758 op = (offset - 4) >> 2;
22759 add_unwind_opcode (op, 1);
22760 }
22761 else if (offset < 0)
22762 {
22763 offset = -offset;
22764 while (offset > 0x100)
22765 {
22766 add_unwind_opcode (0x7f, 1);
22767 offset -= 0x100;
22768 }
22769 op = ((offset - 4) >> 2) | 0x40;
22770 add_unwind_opcode (op, 1);
22771 }
22772 }
22773
22774 /* Finish the list of unwind opcodes for this function. */
22775
22776 static void
22777 finish_unwind_opcodes (void)
22778 {
22779 valueT op;
22780
22781 if (unwind.fp_used)
22782 {
22783 /* Adjust sp as necessary. */
22784 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22785 flush_pending_unwind ();
22786
22787 /* After restoring sp from the frame pointer. */
22788 op = 0x90 | unwind.fp_reg;
22789 add_unwind_opcode (op, 1);
22790 }
22791 else
22792 flush_pending_unwind ();
22793 }
22794
22795
22796 /* Start an exception table entry. If idx is nonzero this is an index table
22797 entry. */
22798
22799 static void
22800 start_unwind_section (const segT text_seg, int idx)
22801 {
22802 const char * text_name;
22803 const char * prefix;
22804 const char * prefix_once;
22805 const char * group_name;
22806 char * sec_name;
22807 int type;
22808 int flags;
22809 int linkonce;
22810
22811 if (idx)
22812 {
22813 prefix = ELF_STRING_ARM_unwind;
22814 prefix_once = ELF_STRING_ARM_unwind_once;
22815 type = SHT_ARM_EXIDX;
22816 }
22817 else
22818 {
22819 prefix = ELF_STRING_ARM_unwind_info;
22820 prefix_once = ELF_STRING_ARM_unwind_info_once;
22821 type = SHT_PROGBITS;
22822 }
22823
22824 text_name = segment_name (text_seg);
22825 if (streq (text_name, ".text"))
22826 text_name = "";
22827
22828 if (strncmp (text_name, ".gnu.linkonce.t.",
22829 strlen (".gnu.linkonce.t.")) == 0)
22830 {
22831 prefix = prefix_once;
22832 text_name += strlen (".gnu.linkonce.t.");
22833 }
22834
22835 sec_name = concat (prefix, text_name, (char *) NULL);
22836
22837 flags = SHF_ALLOC;
22838 linkonce = 0;
22839 group_name = 0;
22840
22841 /* Handle COMDAT group. */
22842 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22843 {
22844 group_name = elf_group_name (text_seg);
22845 if (group_name == NULL)
22846 {
22847 as_bad (_("Group section `%s' has no group signature"),
22848 segment_name (text_seg));
22849 ignore_rest_of_line ();
22850 return;
22851 }
22852 flags |= SHF_GROUP;
22853 linkonce = 1;
22854 }
22855
22856 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22857 linkonce, 0);
22858
22859 /* Set the section link for index tables. */
22860 if (idx)
22861 elf_linked_to_section (now_seg) = text_seg;
22862 }
22863
22864
22865 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22866 personality routine data. Returns zero, or the index table value for
22867 an inline entry. */
22868
22869 static valueT
22870 create_unwind_entry (int have_data)
22871 {
22872 int size;
22873 addressT where;
22874 char *ptr;
22875 /* The current word of data. */
22876 valueT data;
22877 /* The number of bytes left in this word. */
22878 int n;
22879
22880 finish_unwind_opcodes ();
22881
22882 /* Remember the current text section. */
22883 unwind.saved_seg = now_seg;
22884 unwind.saved_subseg = now_subseg;
22885
22886 start_unwind_section (now_seg, 0);
22887
22888 if (unwind.personality_routine == NULL)
22889 {
22890 if (unwind.personality_index == -2)
22891 {
22892 if (have_data)
22893 as_bad (_("handlerdata in cantunwind frame"));
22894 return 1; /* EXIDX_CANTUNWIND. */
22895 }
22896
22897 /* Use a default personality routine if none is specified. */
22898 if (unwind.personality_index == -1)
22899 {
22900 if (unwind.opcode_count > 3)
22901 unwind.personality_index = 1;
22902 else
22903 unwind.personality_index = 0;
22904 }
22905
22906 /* Space for the personality routine entry. */
22907 if (unwind.personality_index == 0)
22908 {
22909 if (unwind.opcode_count > 3)
22910 as_bad (_("too many unwind opcodes for personality routine 0"));
22911
22912 if (!have_data)
22913 {
22914 /* All the data is inline in the index table. */
22915 data = 0x80;
22916 n = 3;
22917 while (unwind.opcode_count > 0)
22918 {
22919 unwind.opcode_count--;
22920 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22921 n--;
22922 }
22923
22924 /* Pad with "finish" opcodes. */
22925 while (n--)
22926 data = (data << 8) | 0xb0;
22927
22928 return data;
22929 }
22930 size = 0;
22931 }
22932 else
22933 /* We get two opcodes "free" in the first word. */
22934 size = unwind.opcode_count - 2;
22935 }
22936 else
22937 {
22938 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22939 if (unwind.personality_index != -1)
22940 {
22941 as_bad (_("attempt to recreate an unwind entry"));
22942 return 1;
22943 }
22944
22945 /* An extra byte is required for the opcode count. */
22946 size = unwind.opcode_count + 1;
22947 }
22948
22949 size = (size + 3) >> 2;
22950 if (size > 0xff)
22951 as_bad (_("too many unwind opcodes"));
22952
22953 frag_align (2, 0, 0);
22954 record_alignment (now_seg, 2);
22955 unwind.table_entry = expr_build_dot ();
22956
22957 /* Allocate the table entry. */
22958 ptr = frag_more ((size << 2) + 4);
22959 /* PR 13449: Zero the table entries in case some of them are not used. */
22960 memset (ptr, 0, (size << 2) + 4);
22961 where = frag_now_fix () - ((size << 2) + 4);
22962
22963 switch (unwind.personality_index)
22964 {
22965 case -1:
22966 /* ??? Should this be a PLT generating relocation? */
22967 /* Custom personality routine. */
22968 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22969 BFD_RELOC_ARM_PREL31);
22970
22971 where += 4;
22972 ptr += 4;
22973
22974 /* Set the first byte to the number of additional words. */
22975 data = size > 0 ? size - 1 : 0;
22976 n = 3;
22977 break;
22978
22979 /* ABI defined personality routines. */
22980 case 0:
22981 /* Three opcodes bytes are packed into the first word. */
22982 data = 0x80;
22983 n = 3;
22984 break;
22985
22986 case 1:
22987 case 2:
22988 /* The size and first two opcode bytes go in the first word. */
22989 data = ((0x80 + unwind.personality_index) << 8) | size;
22990 n = 2;
22991 break;
22992
22993 default:
22994 /* Should never happen. */
22995 abort ();
22996 }
22997
22998 /* Pack the opcodes into words (MSB first), reversing the list at the same
22999 time. */
23000 while (unwind.opcode_count > 0)
23001 {
23002 if (n == 0)
23003 {
23004 md_number_to_chars (ptr, data, 4);
23005 ptr += 4;
23006 n = 4;
23007 data = 0;
23008 }
23009 unwind.opcode_count--;
23010 n--;
23011 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23012 }
23013
23014 /* Finish off the last word. */
23015 if (n < 4)
23016 {
23017 /* Pad with "finish" opcodes. */
23018 while (n--)
23019 data = (data << 8) | 0xb0;
23020
23021 md_number_to_chars (ptr, data, 4);
23022 }
23023
23024 if (!have_data)
23025 {
23026 /* Add an empty descriptor if there is no user-specified data. */
23027 ptr = frag_more (4);
23028 md_number_to_chars (ptr, 0, 4);
23029 }
23030
23031 return 0;
23032 }
23033
23034
23035 /* Initialize the DWARF-2 unwind information for this procedure. */
23036
23037 void
23038 tc_arm_frame_initial_instructions (void)
23039 {
23040 cfi_add_CFA_def_cfa (REG_SP, 0);
23041 }
23042 #endif /* OBJ_ELF */
23043
23044 /* Convert REGNAME to a DWARF-2 register number. */
23045
23046 int
23047 tc_arm_regname_to_dw2regnum (char *regname)
23048 {
23049 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
23050 if (reg != FAIL)
23051 return reg;
23052
23053 /* PR 16694: Allow VFP registers as well. */
23054 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
23055 if (reg != FAIL)
23056 return 64 + reg;
23057
23058 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
23059 if (reg != FAIL)
23060 return reg + 256;
23061
23062 return FAIL;
23063 }
23064
23065 #ifdef TE_PE
23066 void
23067 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
23068 {
23069 expressionS exp;
23070
23071 exp.X_op = O_secrel;
23072 exp.X_add_symbol = symbol;
23073 exp.X_add_number = 0;
23074 emit_expr (&exp, size);
23075 }
23076 #endif
23077
23078 /* MD interface: Symbol and relocation handling. */
23079
23080 /* Return the address within the segment that a PC-relative fixup is
23081 relative to. For ARM, PC-relative fixups applied to instructions
23082 are generally relative to the location of the fixup plus 8 bytes.
23083 Thumb branches are offset by 4, and Thumb loads relative to PC
23084 require special handling. */
23085
23086 long
23087 md_pcrel_from_section (fixS * fixP, segT seg)
23088 {
23089 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
23090
23091 /* If this is pc-relative and we are going to emit a relocation
23092 then we just want to put out any pipeline compensation that the linker
23093 will need. Otherwise we want to use the calculated base.
23094 For WinCE we skip the bias for externals as well, since this
23095 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23096 if (fixP->fx_pcrel
23097 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
23098 || (arm_force_relocation (fixP)
23099 #ifdef TE_WINCE
23100 && !S_IS_EXTERNAL (fixP->fx_addsy)
23101 #endif
23102 )))
23103 base = 0;
23104
23105
23106 switch (fixP->fx_r_type)
23107 {
23108 /* PC relative addressing on the Thumb is slightly odd as the
23109 bottom two bits of the PC are forced to zero for the
23110 calculation. This happens *after* application of the
23111 pipeline offset. However, Thumb adrl already adjusts for
23112 this, so we need not do it again. */
23113 case BFD_RELOC_ARM_THUMB_ADD:
23114 return base & ~3;
23115
23116 case BFD_RELOC_ARM_THUMB_OFFSET:
23117 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23118 case BFD_RELOC_ARM_T32_ADD_PC12:
23119 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23120 return (base + 4) & ~3;
23121
23122 /* Thumb branches are simply offset by +4. */
23123 case BFD_RELOC_THUMB_PCREL_BRANCH5:
23124 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23125 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23126 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23127 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23128 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23129 case BFD_RELOC_THUMB_PCREL_BFCSEL:
23130 case BFD_RELOC_ARM_THUMB_BF17:
23131 case BFD_RELOC_ARM_THUMB_BF19:
23132 case BFD_RELOC_ARM_THUMB_BF13:
23133 case BFD_RELOC_ARM_THUMB_LOOP12:
23134 return base + 4;
23135
23136 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23137 if (fixP->fx_addsy
23138 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23139 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23140 && ARM_IS_FUNC (fixP->fx_addsy)
23141 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23142 base = fixP->fx_where + fixP->fx_frag->fr_address;
23143 return base + 4;
23144
23145 /* BLX is like branches above, but forces the low two bits of PC to
23146 zero. */
23147 case BFD_RELOC_THUMB_PCREL_BLX:
23148 if (fixP->fx_addsy
23149 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23150 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23151 && THUMB_IS_FUNC (fixP->fx_addsy)
23152 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23153 base = fixP->fx_where + fixP->fx_frag->fr_address;
23154 return (base + 4) & ~3;
23155
23156 /* ARM mode branches are offset by +8. However, the Windows CE
23157 loader expects the relocation not to take this into account. */
23158 case BFD_RELOC_ARM_PCREL_BLX:
23159 if (fixP->fx_addsy
23160 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23161 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23162 && ARM_IS_FUNC (fixP->fx_addsy)
23163 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23164 base = fixP->fx_where + fixP->fx_frag->fr_address;
23165 return base + 8;
23166
23167 case BFD_RELOC_ARM_PCREL_CALL:
23168 if (fixP->fx_addsy
23169 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23170 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23171 && THUMB_IS_FUNC (fixP->fx_addsy)
23172 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23173 base = fixP->fx_where + fixP->fx_frag->fr_address;
23174 return base + 8;
23175
23176 case BFD_RELOC_ARM_PCREL_BRANCH:
23177 case BFD_RELOC_ARM_PCREL_JUMP:
23178 case BFD_RELOC_ARM_PLT32:
23179 #ifdef TE_WINCE
23180 /* When handling fixups immediately, because we have already
23181 discovered the value of a symbol, or the address of the frag involved
23182 we must account for the offset by +8, as the OS loader will never see the reloc.
23183 see fixup_segment() in write.c
23184 The S_IS_EXTERNAL test handles the case of global symbols.
23185 Those need the calculated base, not just the pipe compensation the linker will need. */
23186 if (fixP->fx_pcrel
23187 && fixP->fx_addsy != NULL
23188 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23189 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
23190 return base + 8;
23191 return base;
23192 #else
23193 return base + 8;
23194 #endif
23195
23196
23197 /* ARM mode loads relative to PC are also offset by +8. Unlike
23198 branches, the Windows CE loader *does* expect the relocation
23199 to take this into account. */
23200 case BFD_RELOC_ARM_OFFSET_IMM:
23201 case BFD_RELOC_ARM_OFFSET_IMM8:
23202 case BFD_RELOC_ARM_HWLITERAL:
23203 case BFD_RELOC_ARM_LITERAL:
23204 case BFD_RELOC_ARM_CP_OFF_IMM:
23205 return base + 8;
23206
23207
23208 /* Other PC-relative relocations are un-offset. */
23209 default:
23210 return base;
23211 }
23212 }
23213
23214 static bfd_boolean flag_warn_syms = TRUE;
23215
23216 bfd_boolean
23217 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
23218 {
23219 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23220 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23221 does mean that the resulting code might be very confusing to the reader.
23222 Also this warning can be triggered if the user omits an operand before
23223 an immediate address, eg:
23224
23225 LDR =foo
23226
23227 GAS treats this as an assignment of the value of the symbol foo to a
23228 symbol LDR, and so (without this code) it will not issue any kind of
23229 warning or error message.
23230
23231 Note - ARM instructions are case-insensitive but the strings in the hash
23232 table are all stored in lower case, so we must first ensure that name is
23233 lower case too. */
23234 if (flag_warn_syms && arm_ops_hsh)
23235 {
23236 char * nbuf = strdup (name);
23237 char * p;
23238
23239 for (p = nbuf; *p; p++)
23240 *p = TOLOWER (*p);
23241 if (hash_find (arm_ops_hsh, nbuf) != NULL)
23242 {
23243 static struct hash_control * already_warned = NULL;
23244
23245 if (already_warned == NULL)
23246 already_warned = hash_new ();
23247 /* Only warn about the symbol once. To keep the code
23248 simple we let hash_insert do the lookup for us. */
23249 if (hash_insert (already_warned, name, NULL) == NULL)
23250 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
23251 }
23252 else
23253 free (nbuf);
23254 }
23255
23256 return FALSE;
23257 }
23258
23259 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23260 Otherwise we have no need to default values of symbols. */
23261
23262 symbolS *
23263 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
23264 {
23265 #ifdef OBJ_ELF
23266 if (name[0] == '_' && name[1] == 'G'
23267 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
23268 {
23269 if (!GOT_symbol)
23270 {
23271 if (symbol_find (name))
23272 as_bad (_("GOT already in the symbol table"));
23273
23274 GOT_symbol = symbol_new (name, undefined_section,
23275 (valueT) 0, & zero_address_frag);
23276 }
23277
23278 return GOT_symbol;
23279 }
23280 #endif
23281
23282 return NULL;
23283 }
23284
23285 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23286 computed as two separate immediate values, added together. We
23287 already know that this value cannot be computed by just one ARM
23288 instruction. */
23289
23290 static unsigned int
23291 validate_immediate_twopart (unsigned int val,
23292 unsigned int * highpart)
23293 {
23294 unsigned int a;
23295 unsigned int i;
23296
23297 for (i = 0; i < 32; i += 2)
23298 if (((a = rotate_left (val, i)) & 0xff) != 0)
23299 {
23300 if (a & 0xff00)
23301 {
23302 if (a & ~ 0xffff)
23303 continue;
23304 * highpart = (a >> 8) | ((i + 24) << 7);
23305 }
23306 else if (a & 0xff0000)
23307 {
23308 if (a & 0xff000000)
23309 continue;
23310 * highpart = (a >> 16) | ((i + 16) << 7);
23311 }
23312 else
23313 {
23314 gas_assert (a & 0xff000000);
23315 * highpart = (a >> 24) | ((i + 8) << 7);
23316 }
23317
23318 return (a & 0xff) | (i << 7);
23319 }
23320
23321 return FAIL;
23322 }
23323
23324 static int
23325 validate_offset_imm (unsigned int val, int hwse)
23326 {
23327 if ((hwse && val > 255) || val > 4095)
23328 return FAIL;
23329 return val;
23330 }
23331
23332 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23333 negative immediate constant by altering the instruction. A bit of
23334 a hack really.
23335 MOV <-> MVN
23336 AND <-> BIC
23337 ADC <-> SBC
23338 by inverting the second operand, and
23339 ADD <-> SUB
23340 CMP <-> CMN
23341 by negating the second operand. */
23342
23343 static int
23344 negate_data_op (unsigned long * instruction,
23345 unsigned long value)
23346 {
23347 int op, new_inst;
23348 unsigned long negated, inverted;
23349
23350 negated = encode_arm_immediate (-value);
23351 inverted = encode_arm_immediate (~value);
23352
23353 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
23354 switch (op)
23355 {
23356 /* First negates. */
23357 case OPCODE_SUB: /* ADD <-> SUB */
23358 new_inst = OPCODE_ADD;
23359 value = negated;
23360 break;
23361
23362 case OPCODE_ADD:
23363 new_inst = OPCODE_SUB;
23364 value = negated;
23365 break;
23366
23367 case OPCODE_CMP: /* CMP <-> CMN */
23368 new_inst = OPCODE_CMN;
23369 value = negated;
23370 break;
23371
23372 case OPCODE_CMN:
23373 new_inst = OPCODE_CMP;
23374 value = negated;
23375 break;
23376
23377 /* Now Inverted ops. */
23378 case OPCODE_MOV: /* MOV <-> MVN */
23379 new_inst = OPCODE_MVN;
23380 value = inverted;
23381 break;
23382
23383 case OPCODE_MVN:
23384 new_inst = OPCODE_MOV;
23385 value = inverted;
23386 break;
23387
23388 case OPCODE_AND: /* AND <-> BIC */
23389 new_inst = OPCODE_BIC;
23390 value = inverted;
23391 break;
23392
23393 case OPCODE_BIC:
23394 new_inst = OPCODE_AND;
23395 value = inverted;
23396 break;
23397
23398 case OPCODE_ADC: /* ADC <-> SBC */
23399 new_inst = OPCODE_SBC;
23400 value = inverted;
23401 break;
23402
23403 case OPCODE_SBC:
23404 new_inst = OPCODE_ADC;
23405 value = inverted;
23406 break;
23407
23408 /* We cannot do anything. */
23409 default:
23410 return FAIL;
23411 }
23412
23413 if (value == (unsigned) FAIL)
23414 return FAIL;
23415
23416 *instruction &= OPCODE_MASK;
23417 *instruction |= new_inst << DATA_OP_SHIFT;
23418 return value;
23419 }
23420
23421 /* Like negate_data_op, but for Thumb-2. */
23422
23423 static unsigned int
23424 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23425 {
23426 int op, new_inst;
23427 int rd;
23428 unsigned int negated, inverted;
23429
23430 negated = encode_thumb32_immediate (-value);
23431 inverted = encode_thumb32_immediate (~value);
23432
23433 rd = (*instruction >> 8) & 0xf;
23434 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23435 switch (op)
23436 {
23437 /* ADD <-> SUB. Includes CMP <-> CMN. */
23438 case T2_OPCODE_SUB:
23439 new_inst = T2_OPCODE_ADD;
23440 value = negated;
23441 break;
23442
23443 case T2_OPCODE_ADD:
23444 new_inst = T2_OPCODE_SUB;
23445 value = negated;
23446 break;
23447
23448 /* ORR <-> ORN. Includes MOV <-> MVN. */
23449 case T2_OPCODE_ORR:
23450 new_inst = T2_OPCODE_ORN;
23451 value = inverted;
23452 break;
23453
23454 case T2_OPCODE_ORN:
23455 new_inst = T2_OPCODE_ORR;
23456 value = inverted;
23457 break;
23458
23459 /* AND <-> BIC. TST has no inverted equivalent. */
23460 case T2_OPCODE_AND:
23461 new_inst = T2_OPCODE_BIC;
23462 if (rd == 15)
23463 value = FAIL;
23464 else
23465 value = inverted;
23466 break;
23467
23468 case T2_OPCODE_BIC:
23469 new_inst = T2_OPCODE_AND;
23470 value = inverted;
23471 break;
23472
23473 /* ADC <-> SBC */
23474 case T2_OPCODE_ADC:
23475 new_inst = T2_OPCODE_SBC;
23476 value = inverted;
23477 break;
23478
23479 case T2_OPCODE_SBC:
23480 new_inst = T2_OPCODE_ADC;
23481 value = inverted;
23482 break;
23483
23484 /* We cannot do anything. */
23485 default:
23486 return FAIL;
23487 }
23488
23489 if (value == (unsigned int)FAIL)
23490 return FAIL;
23491
23492 *instruction &= T2_OPCODE_MASK;
23493 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23494 return value;
23495 }
23496
23497 /* Read a 32-bit thumb instruction from buf. */
23498
23499 static unsigned long
23500 get_thumb32_insn (char * buf)
23501 {
23502 unsigned long insn;
23503 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23504 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23505
23506 return insn;
23507 }
23508
23509 /* We usually want to set the low bit on the address of thumb function
23510 symbols. In particular .word foo - . should have the low bit set.
23511 Generic code tries to fold the difference of two symbols to
23512 a constant. Prevent this and force a relocation when the first symbols
23513 is a thumb function. */
23514
23515 bfd_boolean
23516 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23517 {
23518 if (op == O_subtract
23519 && l->X_op == O_symbol
23520 && r->X_op == O_symbol
23521 && THUMB_IS_FUNC (l->X_add_symbol))
23522 {
23523 l->X_op = O_subtract;
23524 l->X_op_symbol = r->X_add_symbol;
23525 l->X_add_number -= r->X_add_number;
23526 return TRUE;
23527 }
23528
23529 /* Process as normal. */
23530 return FALSE;
23531 }
23532
23533 /* Encode Thumb2 unconditional branches and calls. The encoding
23534 for the 2 are identical for the immediate values. */
23535
23536 static void
23537 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23538 {
23539 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23540 offsetT newval;
23541 offsetT newval2;
23542 addressT S, I1, I2, lo, hi;
23543
23544 S = (value >> 24) & 0x01;
23545 I1 = (value >> 23) & 0x01;
23546 I2 = (value >> 22) & 0x01;
23547 hi = (value >> 12) & 0x3ff;
23548 lo = (value >> 1) & 0x7ff;
23549 newval = md_chars_to_number (buf, THUMB_SIZE);
23550 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23551 newval |= (S << 10) | hi;
23552 newval2 &= ~T2I1I2MASK;
23553 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23554 md_number_to_chars (buf, newval, THUMB_SIZE);
23555 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23556 }
23557
23558 void
23559 md_apply_fix (fixS * fixP,
23560 valueT * valP,
23561 segT seg)
23562 {
23563 offsetT value = * valP;
23564 offsetT newval;
23565 unsigned int newimm;
23566 unsigned long temp;
23567 int sign;
23568 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23569
23570 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23571
23572 /* Note whether this will delete the relocation. */
23573
23574 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23575 fixP->fx_done = 1;
23576
23577 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23578 consistency with the behaviour on 32-bit hosts. Remember value
23579 for emit_reloc. */
23580 value &= 0xffffffff;
23581 value ^= 0x80000000;
23582 value -= 0x80000000;
23583
23584 *valP = value;
23585 fixP->fx_addnumber = value;
23586
23587 /* Same treatment for fixP->fx_offset. */
23588 fixP->fx_offset &= 0xffffffff;
23589 fixP->fx_offset ^= 0x80000000;
23590 fixP->fx_offset -= 0x80000000;
23591
23592 switch (fixP->fx_r_type)
23593 {
23594 case BFD_RELOC_NONE:
23595 /* This will need to go in the object file. */
23596 fixP->fx_done = 0;
23597 break;
23598
23599 case BFD_RELOC_ARM_IMMEDIATE:
23600 /* We claim that this fixup has been processed here,
23601 even if in fact we generate an error because we do
23602 not have a reloc for it, so tc_gen_reloc will reject it. */
23603 fixP->fx_done = 1;
23604
23605 if (fixP->fx_addsy)
23606 {
23607 const char *msg = 0;
23608
23609 if (! S_IS_DEFINED (fixP->fx_addsy))
23610 msg = _("undefined symbol %s used as an immediate value");
23611 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23612 msg = _("symbol %s is in a different section");
23613 else if (S_IS_WEAK (fixP->fx_addsy))
23614 msg = _("symbol %s is weak and may be overridden later");
23615
23616 if (msg)
23617 {
23618 as_bad_where (fixP->fx_file, fixP->fx_line,
23619 msg, S_GET_NAME (fixP->fx_addsy));
23620 break;
23621 }
23622 }
23623
23624 temp = md_chars_to_number (buf, INSN_SIZE);
23625
23626 /* If the offset is negative, we should use encoding A2 for ADR. */
23627 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23628 newimm = negate_data_op (&temp, value);
23629 else
23630 {
23631 newimm = encode_arm_immediate (value);
23632
23633 /* If the instruction will fail, see if we can fix things up by
23634 changing the opcode. */
23635 if (newimm == (unsigned int) FAIL)
23636 newimm = negate_data_op (&temp, value);
23637 /* MOV accepts both ARM modified immediate (A1 encoding) and
23638 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23639 When disassembling, MOV is preferred when there is no encoding
23640 overlap. */
23641 if (newimm == (unsigned int) FAIL
23642 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23643 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23644 && !((temp >> SBIT_SHIFT) & 0x1)
23645 && value >= 0 && value <= 0xffff)
23646 {
23647 /* Clear bits[23:20] to change encoding from A1 to A2. */
23648 temp &= 0xff0fffff;
23649 /* Encoding high 4bits imm. Code below will encode the remaining
23650 low 12bits. */
23651 temp |= (value & 0x0000f000) << 4;
23652 newimm = value & 0x00000fff;
23653 }
23654 }
23655
23656 if (newimm == (unsigned int) FAIL)
23657 {
23658 as_bad_where (fixP->fx_file, fixP->fx_line,
23659 _("invalid constant (%lx) after fixup"),
23660 (unsigned long) value);
23661 break;
23662 }
23663
23664 newimm |= (temp & 0xfffff000);
23665 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23666 break;
23667
23668 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23669 {
23670 unsigned int highpart = 0;
23671 unsigned int newinsn = 0xe1a00000; /* nop. */
23672
23673 if (fixP->fx_addsy)
23674 {
23675 const char *msg = 0;
23676
23677 if (! S_IS_DEFINED (fixP->fx_addsy))
23678 msg = _("undefined symbol %s used as an immediate value");
23679 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23680 msg = _("symbol %s is in a different section");
23681 else if (S_IS_WEAK (fixP->fx_addsy))
23682 msg = _("symbol %s is weak and may be overridden later");
23683
23684 if (msg)
23685 {
23686 as_bad_where (fixP->fx_file, fixP->fx_line,
23687 msg, S_GET_NAME (fixP->fx_addsy));
23688 break;
23689 }
23690 }
23691
23692 newimm = encode_arm_immediate (value);
23693 temp = md_chars_to_number (buf, INSN_SIZE);
23694
23695 /* If the instruction will fail, see if we can fix things up by
23696 changing the opcode. */
23697 if (newimm == (unsigned int) FAIL
23698 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23699 {
23700 /* No ? OK - try using two ADD instructions to generate
23701 the value. */
23702 newimm = validate_immediate_twopart (value, & highpart);
23703
23704 /* Yes - then make sure that the second instruction is
23705 also an add. */
23706 if (newimm != (unsigned int) FAIL)
23707 newinsn = temp;
23708 /* Still No ? Try using a negated value. */
23709 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23710 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23711 /* Otherwise - give up. */
23712 else
23713 {
23714 as_bad_where (fixP->fx_file, fixP->fx_line,
23715 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23716 (long) value);
23717 break;
23718 }
23719
23720 /* Replace the first operand in the 2nd instruction (which
23721 is the PC) with the destination register. We have
23722 already added in the PC in the first instruction and we
23723 do not want to do it again. */
23724 newinsn &= ~ 0xf0000;
23725 newinsn |= ((newinsn & 0x0f000) << 4);
23726 }
23727
23728 newimm |= (temp & 0xfffff000);
23729 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23730
23731 highpart |= (newinsn & 0xfffff000);
23732 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23733 }
23734 break;
23735
23736 case BFD_RELOC_ARM_OFFSET_IMM:
23737 if (!fixP->fx_done && seg->use_rela_p)
23738 value = 0;
23739 /* Fall through. */
23740
23741 case BFD_RELOC_ARM_LITERAL:
23742 sign = value > 0;
23743
23744 if (value < 0)
23745 value = - value;
23746
23747 if (validate_offset_imm (value, 0) == FAIL)
23748 {
23749 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23750 as_bad_where (fixP->fx_file, fixP->fx_line,
23751 _("invalid literal constant: pool needs to be closer"));
23752 else
23753 as_bad_where (fixP->fx_file, fixP->fx_line,
23754 _("bad immediate value for offset (%ld)"),
23755 (long) value);
23756 break;
23757 }
23758
23759 newval = md_chars_to_number (buf, INSN_SIZE);
23760 if (value == 0)
23761 newval &= 0xfffff000;
23762 else
23763 {
23764 newval &= 0xff7ff000;
23765 newval |= value | (sign ? INDEX_UP : 0);
23766 }
23767 md_number_to_chars (buf, newval, INSN_SIZE);
23768 break;
23769
23770 case BFD_RELOC_ARM_OFFSET_IMM8:
23771 case BFD_RELOC_ARM_HWLITERAL:
23772 sign = value > 0;
23773
23774 if (value < 0)
23775 value = - value;
23776
23777 if (validate_offset_imm (value, 1) == FAIL)
23778 {
23779 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23780 as_bad_where (fixP->fx_file, fixP->fx_line,
23781 _("invalid literal constant: pool needs to be closer"));
23782 else
23783 as_bad_where (fixP->fx_file, fixP->fx_line,
23784 _("bad immediate value for 8-bit offset (%ld)"),
23785 (long) value);
23786 break;
23787 }
23788
23789 newval = md_chars_to_number (buf, INSN_SIZE);
23790 if (value == 0)
23791 newval &= 0xfffff0f0;
23792 else
23793 {
23794 newval &= 0xff7ff0f0;
23795 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23796 }
23797 md_number_to_chars (buf, newval, INSN_SIZE);
23798 break;
23799
23800 case BFD_RELOC_ARM_T32_OFFSET_U8:
23801 if (value < 0 || value > 1020 || value % 4 != 0)
23802 as_bad_where (fixP->fx_file, fixP->fx_line,
23803 _("bad immediate value for offset (%ld)"), (long) value);
23804 value /= 4;
23805
23806 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23807 newval |= value;
23808 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23809 break;
23810
23811 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23812 /* This is a complicated relocation used for all varieties of Thumb32
23813 load/store instruction with immediate offset:
23814
23815 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23816 *4, optional writeback(W)
23817 (doubleword load/store)
23818
23819 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23820 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23821 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23822 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23823 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23824
23825 Uppercase letters indicate bits that are already encoded at
23826 this point. Lowercase letters are our problem. For the
23827 second block of instructions, the secondary opcode nybble
23828 (bits 8..11) is present, and bit 23 is zero, even if this is
23829 a PC-relative operation. */
23830 newval = md_chars_to_number (buf, THUMB_SIZE);
23831 newval <<= 16;
23832 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23833
23834 if ((newval & 0xf0000000) == 0xe0000000)
23835 {
23836 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23837 if (value >= 0)
23838 newval |= (1 << 23);
23839 else
23840 value = -value;
23841 if (value % 4 != 0)
23842 {
23843 as_bad_where (fixP->fx_file, fixP->fx_line,
23844 _("offset not a multiple of 4"));
23845 break;
23846 }
23847 value /= 4;
23848 if (value > 0xff)
23849 {
23850 as_bad_where (fixP->fx_file, fixP->fx_line,
23851 _("offset out of range"));
23852 break;
23853 }
23854 newval &= ~0xff;
23855 }
23856 else if ((newval & 0x000f0000) == 0x000f0000)
23857 {
23858 /* PC-relative, 12-bit offset. */
23859 if (value >= 0)
23860 newval |= (1 << 23);
23861 else
23862 value = -value;
23863 if (value > 0xfff)
23864 {
23865 as_bad_where (fixP->fx_file, fixP->fx_line,
23866 _("offset out of range"));
23867 break;
23868 }
23869 newval &= ~0xfff;
23870 }
23871 else if ((newval & 0x00000100) == 0x00000100)
23872 {
23873 /* Writeback: 8-bit, +/- offset. */
23874 if (value >= 0)
23875 newval |= (1 << 9);
23876 else
23877 value = -value;
23878 if (value > 0xff)
23879 {
23880 as_bad_where (fixP->fx_file, fixP->fx_line,
23881 _("offset out of range"));
23882 break;
23883 }
23884 newval &= ~0xff;
23885 }
23886 else if ((newval & 0x00000f00) == 0x00000e00)
23887 {
23888 /* T-instruction: positive 8-bit offset. */
23889 if (value < 0 || value > 0xff)
23890 {
23891 as_bad_where (fixP->fx_file, fixP->fx_line,
23892 _("offset out of range"));
23893 break;
23894 }
23895 newval &= ~0xff;
23896 newval |= value;
23897 }
23898 else
23899 {
23900 /* Positive 12-bit or negative 8-bit offset. */
23901 int limit;
23902 if (value >= 0)
23903 {
23904 newval |= (1 << 23);
23905 limit = 0xfff;
23906 }
23907 else
23908 {
23909 value = -value;
23910 limit = 0xff;
23911 }
23912 if (value > limit)
23913 {
23914 as_bad_where (fixP->fx_file, fixP->fx_line,
23915 _("offset out of range"));
23916 break;
23917 }
23918 newval &= ~limit;
23919 }
23920
23921 newval |= value;
23922 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23923 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23924 break;
23925
23926 case BFD_RELOC_ARM_SHIFT_IMM:
23927 newval = md_chars_to_number (buf, INSN_SIZE);
23928 if (((unsigned long) value) > 32
23929 || (value == 32
23930 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23931 {
23932 as_bad_where (fixP->fx_file, fixP->fx_line,
23933 _("shift expression is too large"));
23934 break;
23935 }
23936
23937 if (value == 0)
23938 /* Shifts of zero must be done as lsl. */
23939 newval &= ~0x60;
23940 else if (value == 32)
23941 value = 0;
23942 newval &= 0xfffff07f;
23943 newval |= (value & 0x1f) << 7;
23944 md_number_to_chars (buf, newval, INSN_SIZE);
23945 break;
23946
23947 case BFD_RELOC_ARM_T32_IMMEDIATE:
23948 case BFD_RELOC_ARM_T32_ADD_IMM:
23949 case BFD_RELOC_ARM_T32_IMM12:
23950 case BFD_RELOC_ARM_T32_ADD_PC12:
23951 /* We claim that this fixup has been processed here,
23952 even if in fact we generate an error because we do
23953 not have a reloc for it, so tc_gen_reloc will reject it. */
23954 fixP->fx_done = 1;
23955
23956 if (fixP->fx_addsy
23957 && ! S_IS_DEFINED (fixP->fx_addsy))
23958 {
23959 as_bad_where (fixP->fx_file, fixP->fx_line,
23960 _("undefined symbol %s used as an immediate value"),
23961 S_GET_NAME (fixP->fx_addsy));
23962 break;
23963 }
23964
23965 newval = md_chars_to_number (buf, THUMB_SIZE);
23966 newval <<= 16;
23967 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23968
23969 newimm = FAIL;
23970 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23971 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23972 Thumb2 modified immediate encoding (T2). */
23973 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23974 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23975 {
23976 newimm = encode_thumb32_immediate (value);
23977 if (newimm == (unsigned int) FAIL)
23978 newimm = thumb32_negate_data_op (&newval, value);
23979 }
23980 if (newimm == (unsigned int) FAIL)
23981 {
23982 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23983 {
23984 /* Turn add/sum into addw/subw. */
23985 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23986 newval = (newval & 0xfeffffff) | 0x02000000;
23987 /* No flat 12-bit imm encoding for addsw/subsw. */
23988 if ((newval & 0x00100000) == 0)
23989 {
23990 /* 12 bit immediate for addw/subw. */
23991 if (value < 0)
23992 {
23993 value = -value;
23994 newval ^= 0x00a00000;
23995 }
23996 if (value > 0xfff)
23997 newimm = (unsigned int) FAIL;
23998 else
23999 newimm = value;
24000 }
24001 }
24002 else
24003 {
24004 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24005 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24006 disassembling, MOV is preferred when there is no encoding
24007 overlap. */
24008 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
24009 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24010 but with the Rn field [19:16] set to 1111. */
24011 && (((newval >> 16) & 0xf) == 0xf)
24012 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
24013 && !((newval >> T2_SBIT_SHIFT) & 0x1)
24014 && value >= 0 && value <= 0xffff)
24015 {
24016 /* Toggle bit[25] to change encoding from T2 to T3. */
24017 newval ^= 1 << 25;
24018 /* Clear bits[19:16]. */
24019 newval &= 0xfff0ffff;
24020 /* Encoding high 4bits imm. Code below will encode the
24021 remaining low 12bits. */
24022 newval |= (value & 0x0000f000) << 4;
24023 newimm = value & 0x00000fff;
24024 }
24025 }
24026 }
24027
24028 if (newimm == (unsigned int)FAIL)
24029 {
24030 as_bad_where (fixP->fx_file, fixP->fx_line,
24031 _("invalid constant (%lx) after fixup"),
24032 (unsigned long) value);
24033 break;
24034 }
24035
24036 newval |= (newimm & 0x800) << 15;
24037 newval |= (newimm & 0x700) << 4;
24038 newval |= (newimm & 0x0ff);
24039
24040 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
24041 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
24042 break;
24043
24044 case BFD_RELOC_ARM_SMC:
24045 if (((unsigned long) value) > 0xffff)
24046 as_bad_where (fixP->fx_file, fixP->fx_line,
24047 _("invalid smc expression"));
24048 newval = md_chars_to_number (buf, INSN_SIZE);
24049 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24050 md_number_to_chars (buf, newval, INSN_SIZE);
24051 break;
24052
24053 case BFD_RELOC_ARM_HVC:
24054 if (((unsigned long) value) > 0xffff)
24055 as_bad_where (fixP->fx_file, fixP->fx_line,
24056 _("invalid hvc expression"));
24057 newval = md_chars_to_number (buf, INSN_SIZE);
24058 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24059 md_number_to_chars (buf, newval, INSN_SIZE);
24060 break;
24061
24062 case BFD_RELOC_ARM_SWI:
24063 if (fixP->tc_fix_data != 0)
24064 {
24065 if (((unsigned long) value) > 0xff)
24066 as_bad_where (fixP->fx_file, fixP->fx_line,
24067 _("invalid swi expression"));
24068 newval = md_chars_to_number (buf, THUMB_SIZE);
24069 newval |= value;
24070 md_number_to_chars (buf, newval, THUMB_SIZE);
24071 }
24072 else
24073 {
24074 if (((unsigned long) value) > 0x00ffffff)
24075 as_bad_where (fixP->fx_file, fixP->fx_line,
24076 _("invalid swi expression"));
24077 newval = md_chars_to_number (buf, INSN_SIZE);
24078 newval |= value;
24079 md_number_to_chars (buf, newval, INSN_SIZE);
24080 }
24081 break;
24082
24083 case BFD_RELOC_ARM_MULTI:
24084 if (((unsigned long) value) > 0xffff)
24085 as_bad_where (fixP->fx_file, fixP->fx_line,
24086 _("invalid expression in load/store multiple"));
24087 newval = value | md_chars_to_number (buf, INSN_SIZE);
24088 md_number_to_chars (buf, newval, INSN_SIZE);
24089 break;
24090
24091 #ifdef OBJ_ELF
24092 case BFD_RELOC_ARM_PCREL_CALL:
24093
24094 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24095 && fixP->fx_addsy
24096 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24097 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24098 && THUMB_IS_FUNC (fixP->fx_addsy))
24099 /* Flip the bl to blx. This is a simple flip
24100 bit here because we generate PCREL_CALL for
24101 unconditional bls. */
24102 {
24103 newval = md_chars_to_number (buf, INSN_SIZE);
24104 newval = newval | 0x10000000;
24105 md_number_to_chars (buf, newval, INSN_SIZE);
24106 temp = 1;
24107 fixP->fx_done = 1;
24108 }
24109 else
24110 temp = 3;
24111 goto arm_branch_common;
24112
24113 case BFD_RELOC_ARM_PCREL_JUMP:
24114 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24115 && fixP->fx_addsy
24116 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24117 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24118 && THUMB_IS_FUNC (fixP->fx_addsy))
24119 {
24120 /* This would map to a bl<cond>, b<cond>,
24121 b<always> to a Thumb function. We
24122 need to force a relocation for this particular
24123 case. */
24124 newval = md_chars_to_number (buf, INSN_SIZE);
24125 fixP->fx_done = 0;
24126 }
24127 /* Fall through. */
24128
24129 case BFD_RELOC_ARM_PLT32:
24130 #endif
24131 case BFD_RELOC_ARM_PCREL_BRANCH:
24132 temp = 3;
24133 goto arm_branch_common;
24134
24135 case BFD_RELOC_ARM_PCREL_BLX:
24136
24137 temp = 1;
24138 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24139 && fixP->fx_addsy
24140 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24141 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24142 && ARM_IS_FUNC (fixP->fx_addsy))
24143 {
24144 /* Flip the blx to a bl and warn. */
24145 const char *name = S_GET_NAME (fixP->fx_addsy);
24146 newval = 0xeb000000;
24147 as_warn_where (fixP->fx_file, fixP->fx_line,
24148 _("blx to '%s' an ARM ISA state function changed to bl"),
24149 name);
24150 md_number_to_chars (buf, newval, INSN_SIZE);
24151 temp = 3;
24152 fixP->fx_done = 1;
24153 }
24154
24155 #ifdef OBJ_ELF
24156 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24157 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
24158 #endif
24159
24160 arm_branch_common:
24161 /* We are going to store value (shifted right by two) in the
24162 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24163 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24164 also be clear. */
24165 if (value & temp)
24166 as_bad_where (fixP->fx_file, fixP->fx_line,
24167 _("misaligned branch destination"));
24168 if ((value & (offsetT)0xfe000000) != (offsetT)0
24169 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
24170 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24171
24172 if (fixP->fx_done || !seg->use_rela_p)
24173 {
24174 newval = md_chars_to_number (buf, INSN_SIZE);
24175 newval |= (value >> 2) & 0x00ffffff;
24176 /* Set the H bit on BLX instructions. */
24177 if (temp == 1)
24178 {
24179 if (value & 2)
24180 newval |= 0x01000000;
24181 else
24182 newval &= ~0x01000000;
24183 }
24184 md_number_to_chars (buf, newval, INSN_SIZE);
24185 }
24186 break;
24187
24188 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
24189 /* CBZ can only branch forward. */
24190
24191 /* Attempts to use CBZ to branch to the next instruction
24192 (which, strictly speaking, are prohibited) will be turned into
24193 no-ops.
24194
24195 FIXME: It may be better to remove the instruction completely and
24196 perform relaxation. */
24197 if (value == -2)
24198 {
24199 newval = md_chars_to_number (buf, THUMB_SIZE);
24200 newval = 0xbf00; /* NOP encoding T1 */
24201 md_number_to_chars (buf, newval, THUMB_SIZE);
24202 }
24203 else
24204 {
24205 if (value & ~0x7e)
24206 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24207
24208 if (fixP->fx_done || !seg->use_rela_p)
24209 {
24210 newval = md_chars_to_number (buf, THUMB_SIZE);
24211 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
24212 md_number_to_chars (buf, newval, THUMB_SIZE);
24213 }
24214 }
24215 break;
24216
24217 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
24218 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
24219 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24220
24221 if (fixP->fx_done || !seg->use_rela_p)
24222 {
24223 newval = md_chars_to_number (buf, THUMB_SIZE);
24224 newval |= (value & 0x1ff) >> 1;
24225 md_number_to_chars (buf, newval, THUMB_SIZE);
24226 }
24227 break;
24228
24229 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
24230 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
24231 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24232
24233 if (fixP->fx_done || !seg->use_rela_p)
24234 {
24235 newval = md_chars_to_number (buf, THUMB_SIZE);
24236 newval |= (value & 0xfff) >> 1;
24237 md_number_to_chars (buf, newval, THUMB_SIZE);
24238 }
24239 break;
24240
24241 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24242 if (fixP->fx_addsy
24243 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24244 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24245 && ARM_IS_FUNC (fixP->fx_addsy)
24246 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24247 {
24248 /* Force a relocation for a branch 20 bits wide. */
24249 fixP->fx_done = 0;
24250 }
24251 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
24252 as_bad_where (fixP->fx_file, fixP->fx_line,
24253 _("conditional branch out of range"));
24254
24255 if (fixP->fx_done || !seg->use_rela_p)
24256 {
24257 offsetT newval2;
24258 addressT S, J1, J2, lo, hi;
24259
24260 S = (value & 0x00100000) >> 20;
24261 J2 = (value & 0x00080000) >> 19;
24262 J1 = (value & 0x00040000) >> 18;
24263 hi = (value & 0x0003f000) >> 12;
24264 lo = (value & 0x00000ffe) >> 1;
24265
24266 newval = md_chars_to_number (buf, THUMB_SIZE);
24267 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24268 newval |= (S << 10) | hi;
24269 newval2 |= (J1 << 13) | (J2 << 11) | lo;
24270 md_number_to_chars (buf, newval, THUMB_SIZE);
24271 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24272 }
24273 break;
24274
24275 case BFD_RELOC_THUMB_PCREL_BLX:
24276 /* If there is a blx from a thumb state function to
24277 another thumb function flip this to a bl and warn
24278 about it. */
24279
24280 if (fixP->fx_addsy
24281 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24282 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24283 && THUMB_IS_FUNC (fixP->fx_addsy))
24284 {
24285 const char *name = S_GET_NAME (fixP->fx_addsy);
24286 as_warn_where (fixP->fx_file, fixP->fx_line,
24287 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24288 name);
24289 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24290 newval = newval | 0x1000;
24291 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24292 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24293 fixP->fx_done = 1;
24294 }
24295
24296
24297 goto thumb_bl_common;
24298
24299 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24300 /* A bl from Thumb state ISA to an internal ARM state function
24301 is converted to a blx. */
24302 if (fixP->fx_addsy
24303 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24304 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24305 && ARM_IS_FUNC (fixP->fx_addsy)
24306 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24307 {
24308 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24309 newval = newval & ~0x1000;
24310 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24311 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
24312 fixP->fx_done = 1;
24313 }
24314
24315 thumb_bl_common:
24316
24317 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24318 /* For a BLX instruction, make sure that the relocation is rounded up
24319 to a word boundary. This follows the semantics of the instruction
24320 which specifies that bit 1 of the target address will come from bit
24321 1 of the base address. */
24322 value = (value + 3) & ~ 3;
24323
24324 #ifdef OBJ_ELF
24325 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
24326 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24327 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24328 #endif
24329
24330 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
24331 {
24332 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
24333 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24334 else if ((value & ~0x1ffffff)
24335 && ((value & ~0x1ffffff) != ~0x1ffffff))
24336 as_bad_where (fixP->fx_file, fixP->fx_line,
24337 _("Thumb2 branch out of range"));
24338 }
24339
24340 if (fixP->fx_done || !seg->use_rela_p)
24341 encode_thumb2_b_bl_offset (buf, value);
24342
24343 break;
24344
24345 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24346 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
24347 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24348
24349 if (fixP->fx_done || !seg->use_rela_p)
24350 encode_thumb2_b_bl_offset (buf, value);
24351
24352 break;
24353
24354 case BFD_RELOC_8:
24355 if (fixP->fx_done || !seg->use_rela_p)
24356 *buf = value;
24357 break;
24358
24359 case BFD_RELOC_16:
24360 if (fixP->fx_done || !seg->use_rela_p)
24361 md_number_to_chars (buf, value, 2);
24362 break;
24363
24364 #ifdef OBJ_ELF
24365 case BFD_RELOC_ARM_TLS_CALL:
24366 case BFD_RELOC_ARM_THM_TLS_CALL:
24367 case BFD_RELOC_ARM_TLS_DESCSEQ:
24368 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24369 case BFD_RELOC_ARM_TLS_GOTDESC:
24370 case BFD_RELOC_ARM_TLS_GD32:
24371 case BFD_RELOC_ARM_TLS_LE32:
24372 case BFD_RELOC_ARM_TLS_IE32:
24373 case BFD_RELOC_ARM_TLS_LDM32:
24374 case BFD_RELOC_ARM_TLS_LDO32:
24375 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24376 break;
24377
24378 /* Same handling as above, but with the arm_fdpic guard. */
24379 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24380 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24381 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24382 if (arm_fdpic)
24383 {
24384 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24385 }
24386 else
24387 {
24388 as_bad_where (fixP->fx_file, fixP->fx_line,
24389 _("Relocation supported only in FDPIC mode"));
24390 }
24391 break;
24392
24393 case BFD_RELOC_ARM_GOT32:
24394 case BFD_RELOC_ARM_GOTOFF:
24395 break;
24396
24397 case BFD_RELOC_ARM_GOT_PREL:
24398 if (fixP->fx_done || !seg->use_rela_p)
24399 md_number_to_chars (buf, value, 4);
24400 break;
24401
24402 case BFD_RELOC_ARM_TARGET2:
24403 /* TARGET2 is not partial-inplace, so we need to write the
24404 addend here for REL targets, because it won't be written out
24405 during reloc processing later. */
24406 if (fixP->fx_done || !seg->use_rela_p)
24407 md_number_to_chars (buf, fixP->fx_offset, 4);
24408 break;
24409
24410 /* Relocations for FDPIC. */
24411 case BFD_RELOC_ARM_GOTFUNCDESC:
24412 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24413 case BFD_RELOC_ARM_FUNCDESC:
24414 if (arm_fdpic)
24415 {
24416 if (fixP->fx_done || !seg->use_rela_p)
24417 md_number_to_chars (buf, 0, 4);
24418 }
24419 else
24420 {
24421 as_bad_where (fixP->fx_file, fixP->fx_line,
24422 _("Relocation supported only in FDPIC mode"));
24423 }
24424 break;
24425 #endif
24426
24427 case BFD_RELOC_RVA:
24428 case BFD_RELOC_32:
24429 case BFD_RELOC_ARM_TARGET1:
24430 case BFD_RELOC_ARM_ROSEGREL32:
24431 case BFD_RELOC_ARM_SBREL32:
24432 case BFD_RELOC_32_PCREL:
24433 #ifdef TE_PE
24434 case BFD_RELOC_32_SECREL:
24435 #endif
24436 if (fixP->fx_done || !seg->use_rela_p)
24437 #ifdef TE_WINCE
24438 /* For WinCE we only do this for pcrel fixups. */
24439 if (fixP->fx_done || fixP->fx_pcrel)
24440 #endif
24441 md_number_to_chars (buf, value, 4);
24442 break;
24443
24444 #ifdef OBJ_ELF
24445 case BFD_RELOC_ARM_PREL31:
24446 if (fixP->fx_done || !seg->use_rela_p)
24447 {
24448 newval = md_chars_to_number (buf, 4) & 0x80000000;
24449 if ((value ^ (value >> 1)) & 0x40000000)
24450 {
24451 as_bad_where (fixP->fx_file, fixP->fx_line,
24452 _("rel31 relocation overflow"));
24453 }
24454 newval |= value & 0x7fffffff;
24455 md_number_to_chars (buf, newval, 4);
24456 }
24457 break;
24458 #endif
24459
24460 case BFD_RELOC_ARM_CP_OFF_IMM:
24461 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24462 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24463 newval = md_chars_to_number (buf, INSN_SIZE);
24464 else
24465 newval = get_thumb32_insn (buf);
24466 if ((newval & 0x0f200f00) == 0x0d000900)
24467 {
24468 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24469 has permitted values that are multiples of 2, in the range 0
24470 to 510. */
24471 if (value < -510 || value > 510 || (value & 1))
24472 as_bad_where (fixP->fx_file, fixP->fx_line,
24473 _("co-processor offset out of range"));
24474 }
24475 else if (value < -1023 || value > 1023 || (value & 3))
24476 as_bad_where (fixP->fx_file, fixP->fx_line,
24477 _("co-processor offset out of range"));
24478 cp_off_common:
24479 sign = value > 0;
24480 if (value < 0)
24481 value = -value;
24482 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24483 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24484 newval = md_chars_to_number (buf, INSN_SIZE);
24485 else
24486 newval = get_thumb32_insn (buf);
24487 if (value == 0)
24488 newval &= 0xffffff00;
24489 else
24490 {
24491 newval &= 0xff7fff00;
24492 if ((newval & 0x0f200f00) == 0x0d000900)
24493 {
24494 /* This is a fp16 vstr/vldr.
24495
24496 It requires the immediate offset in the instruction is shifted
24497 left by 1 to be a half-word offset.
24498
24499 Here, left shift by 1 first, and later right shift by 2
24500 should get the right offset. */
24501 value <<= 1;
24502 }
24503 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24504 }
24505 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24506 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24507 md_number_to_chars (buf, newval, INSN_SIZE);
24508 else
24509 put_thumb32_insn (buf, newval);
24510 break;
24511
24512 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24513 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24514 if (value < -255 || value > 255)
24515 as_bad_where (fixP->fx_file, fixP->fx_line,
24516 _("co-processor offset out of range"));
24517 value *= 4;
24518 goto cp_off_common;
24519
24520 case BFD_RELOC_ARM_THUMB_OFFSET:
24521 newval = md_chars_to_number (buf, THUMB_SIZE);
24522 /* Exactly what ranges, and where the offset is inserted depends
24523 on the type of instruction, we can establish this from the
24524 top 4 bits. */
24525 switch (newval >> 12)
24526 {
24527 case 4: /* PC load. */
24528 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24529 forced to zero for these loads; md_pcrel_from has already
24530 compensated for this. */
24531 if (value & 3)
24532 as_bad_where (fixP->fx_file, fixP->fx_line,
24533 _("invalid offset, target not word aligned (0x%08lX)"),
24534 (((unsigned long) fixP->fx_frag->fr_address
24535 + (unsigned long) fixP->fx_where) & ~3)
24536 + (unsigned long) value);
24537
24538 if (value & ~0x3fc)
24539 as_bad_where (fixP->fx_file, fixP->fx_line,
24540 _("invalid offset, value too big (0x%08lX)"),
24541 (long) value);
24542
24543 newval |= value >> 2;
24544 break;
24545
24546 case 9: /* SP load/store. */
24547 if (value & ~0x3fc)
24548 as_bad_where (fixP->fx_file, fixP->fx_line,
24549 _("invalid offset, value too big (0x%08lX)"),
24550 (long) value);
24551 newval |= value >> 2;
24552 break;
24553
24554 case 6: /* Word load/store. */
24555 if (value & ~0x7c)
24556 as_bad_where (fixP->fx_file, fixP->fx_line,
24557 _("invalid offset, value too big (0x%08lX)"),
24558 (long) value);
24559 newval |= value << 4; /* 6 - 2. */
24560 break;
24561
24562 case 7: /* Byte load/store. */
24563 if (value & ~0x1f)
24564 as_bad_where (fixP->fx_file, fixP->fx_line,
24565 _("invalid offset, value too big (0x%08lX)"),
24566 (long) value);
24567 newval |= value << 6;
24568 break;
24569
24570 case 8: /* Halfword load/store. */
24571 if (value & ~0x3e)
24572 as_bad_where (fixP->fx_file, fixP->fx_line,
24573 _("invalid offset, value too big (0x%08lX)"),
24574 (long) value);
24575 newval |= value << 5; /* 6 - 1. */
24576 break;
24577
24578 default:
24579 as_bad_where (fixP->fx_file, fixP->fx_line,
24580 "Unable to process relocation for thumb opcode: %lx",
24581 (unsigned long) newval);
24582 break;
24583 }
24584 md_number_to_chars (buf, newval, THUMB_SIZE);
24585 break;
24586
24587 case BFD_RELOC_ARM_THUMB_ADD:
24588 /* This is a complicated relocation, since we use it for all of
24589 the following immediate relocations:
24590
24591 3bit ADD/SUB
24592 8bit ADD/SUB
24593 9bit ADD/SUB SP word-aligned
24594 10bit ADD PC/SP word-aligned
24595
24596 The type of instruction being processed is encoded in the
24597 instruction field:
24598
24599 0x8000 SUB
24600 0x00F0 Rd
24601 0x000F Rs
24602 */
24603 newval = md_chars_to_number (buf, THUMB_SIZE);
24604 {
24605 int rd = (newval >> 4) & 0xf;
24606 int rs = newval & 0xf;
24607 int subtract = !!(newval & 0x8000);
24608
24609 /* Check for HI regs, only very restricted cases allowed:
24610 Adjusting SP, and using PC or SP to get an address. */
24611 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24612 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24613 as_bad_where (fixP->fx_file, fixP->fx_line,
24614 _("invalid Hi register with immediate"));
24615
24616 /* If value is negative, choose the opposite instruction. */
24617 if (value < 0)
24618 {
24619 value = -value;
24620 subtract = !subtract;
24621 if (value < 0)
24622 as_bad_where (fixP->fx_file, fixP->fx_line,
24623 _("immediate value out of range"));
24624 }
24625
24626 if (rd == REG_SP)
24627 {
24628 if (value & ~0x1fc)
24629 as_bad_where (fixP->fx_file, fixP->fx_line,
24630 _("invalid immediate for stack address calculation"));
24631 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24632 newval |= value >> 2;
24633 }
24634 else if (rs == REG_PC || rs == REG_SP)
24635 {
24636 /* PR gas/18541. If the addition is for a defined symbol
24637 within range of an ADR instruction then accept it. */
24638 if (subtract
24639 && value == 4
24640 && fixP->fx_addsy != NULL)
24641 {
24642 subtract = 0;
24643
24644 if (! S_IS_DEFINED (fixP->fx_addsy)
24645 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24646 || S_IS_WEAK (fixP->fx_addsy))
24647 {
24648 as_bad_where (fixP->fx_file, fixP->fx_line,
24649 _("address calculation needs a strongly defined nearby symbol"));
24650 }
24651 else
24652 {
24653 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24654
24655 /* Round up to the next 4-byte boundary. */
24656 if (v & 3)
24657 v = (v + 3) & ~ 3;
24658 else
24659 v += 4;
24660 v = S_GET_VALUE (fixP->fx_addsy) - v;
24661
24662 if (v & ~0x3fc)
24663 {
24664 as_bad_where (fixP->fx_file, fixP->fx_line,
24665 _("symbol too far away"));
24666 }
24667 else
24668 {
24669 fixP->fx_done = 1;
24670 value = v;
24671 }
24672 }
24673 }
24674
24675 if (subtract || value & ~0x3fc)
24676 as_bad_where (fixP->fx_file, fixP->fx_line,
24677 _("invalid immediate for address calculation (value = 0x%08lX)"),
24678 (unsigned long) (subtract ? - value : value));
24679 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24680 newval |= rd << 8;
24681 newval |= value >> 2;
24682 }
24683 else if (rs == rd)
24684 {
24685 if (value & ~0xff)
24686 as_bad_where (fixP->fx_file, fixP->fx_line,
24687 _("immediate value out of range"));
24688 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24689 newval |= (rd << 8) | value;
24690 }
24691 else
24692 {
24693 if (value & ~0x7)
24694 as_bad_where (fixP->fx_file, fixP->fx_line,
24695 _("immediate value out of range"));
24696 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24697 newval |= rd | (rs << 3) | (value << 6);
24698 }
24699 }
24700 md_number_to_chars (buf, newval, THUMB_SIZE);
24701 break;
24702
24703 case BFD_RELOC_ARM_THUMB_IMM:
24704 newval = md_chars_to_number (buf, THUMB_SIZE);
24705 if (value < 0 || value > 255)
24706 as_bad_where (fixP->fx_file, fixP->fx_line,
24707 _("invalid immediate: %ld is out of range"),
24708 (long) value);
24709 newval |= value;
24710 md_number_to_chars (buf, newval, THUMB_SIZE);
24711 break;
24712
24713 case BFD_RELOC_ARM_THUMB_SHIFT:
24714 /* 5bit shift value (0..32). LSL cannot take 32. */
24715 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24716 temp = newval & 0xf800;
24717 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24718 as_bad_where (fixP->fx_file, fixP->fx_line,
24719 _("invalid shift value: %ld"), (long) value);
24720 /* Shifts of zero must be encoded as LSL. */
24721 if (value == 0)
24722 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24723 /* Shifts of 32 are encoded as zero. */
24724 else if (value == 32)
24725 value = 0;
24726 newval |= value << 6;
24727 md_number_to_chars (buf, newval, THUMB_SIZE);
24728 break;
24729
24730 case BFD_RELOC_VTABLE_INHERIT:
24731 case BFD_RELOC_VTABLE_ENTRY:
24732 fixP->fx_done = 0;
24733 return;
24734
24735 case BFD_RELOC_ARM_MOVW:
24736 case BFD_RELOC_ARM_MOVT:
24737 case BFD_RELOC_ARM_THUMB_MOVW:
24738 case BFD_RELOC_ARM_THUMB_MOVT:
24739 if (fixP->fx_done || !seg->use_rela_p)
24740 {
24741 /* REL format relocations are limited to a 16-bit addend. */
24742 if (!fixP->fx_done)
24743 {
24744 if (value < -0x8000 || value > 0x7fff)
24745 as_bad_where (fixP->fx_file, fixP->fx_line,
24746 _("offset out of range"));
24747 }
24748 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24749 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24750 {
24751 value >>= 16;
24752 }
24753
24754 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24755 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24756 {
24757 newval = get_thumb32_insn (buf);
24758 newval &= 0xfbf08f00;
24759 newval |= (value & 0xf000) << 4;
24760 newval |= (value & 0x0800) << 15;
24761 newval |= (value & 0x0700) << 4;
24762 newval |= (value & 0x00ff);
24763 put_thumb32_insn (buf, newval);
24764 }
24765 else
24766 {
24767 newval = md_chars_to_number (buf, 4);
24768 newval &= 0xfff0f000;
24769 newval |= value & 0x0fff;
24770 newval |= (value & 0xf000) << 4;
24771 md_number_to_chars (buf, newval, 4);
24772 }
24773 }
24774 return;
24775
24776 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24778 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24779 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24780 gas_assert (!fixP->fx_done);
24781 {
24782 bfd_vma insn;
24783 bfd_boolean is_mov;
24784 bfd_vma encoded_addend = value;
24785
24786 /* Check that addend can be encoded in instruction. */
24787 if (!seg->use_rela_p && (value < 0 || value > 255))
24788 as_bad_where (fixP->fx_file, fixP->fx_line,
24789 _("the offset 0x%08lX is not representable"),
24790 (unsigned long) encoded_addend);
24791
24792 /* Extract the instruction. */
24793 insn = md_chars_to_number (buf, THUMB_SIZE);
24794 is_mov = (insn & 0xf800) == 0x2000;
24795
24796 /* Encode insn. */
24797 if (is_mov)
24798 {
24799 if (!seg->use_rela_p)
24800 insn |= encoded_addend;
24801 }
24802 else
24803 {
24804 int rd, rs;
24805
24806 /* Extract the instruction. */
24807 /* Encoding is the following
24808 0x8000 SUB
24809 0x00F0 Rd
24810 0x000F Rs
24811 */
24812 /* The following conditions must be true :
24813 - ADD
24814 - Rd == Rs
24815 - Rd <= 7
24816 */
24817 rd = (insn >> 4) & 0xf;
24818 rs = insn & 0xf;
24819 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24820 as_bad_where (fixP->fx_file, fixP->fx_line,
24821 _("Unable to process relocation for thumb opcode: %lx"),
24822 (unsigned long) insn);
24823
24824 /* Encode as ADD immediate8 thumb 1 code. */
24825 insn = 0x3000 | (rd << 8);
24826
24827 /* Place the encoded addend into the first 8 bits of the
24828 instruction. */
24829 if (!seg->use_rela_p)
24830 insn |= encoded_addend;
24831 }
24832
24833 /* Update the instruction. */
24834 md_number_to_chars (buf, insn, THUMB_SIZE);
24835 }
24836 break;
24837
24838 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24839 case BFD_RELOC_ARM_ALU_PC_G0:
24840 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24841 case BFD_RELOC_ARM_ALU_PC_G1:
24842 case BFD_RELOC_ARM_ALU_PC_G2:
24843 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24844 case BFD_RELOC_ARM_ALU_SB_G0:
24845 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24846 case BFD_RELOC_ARM_ALU_SB_G1:
24847 case BFD_RELOC_ARM_ALU_SB_G2:
24848 gas_assert (!fixP->fx_done);
24849 if (!seg->use_rela_p)
24850 {
24851 bfd_vma insn;
24852 bfd_vma encoded_addend;
24853 bfd_vma addend_abs = llabs (value);
24854
24855 /* Check that the absolute value of the addend can be
24856 expressed as an 8-bit constant plus a rotation. */
24857 encoded_addend = encode_arm_immediate (addend_abs);
24858 if (encoded_addend == (unsigned int) FAIL)
24859 as_bad_where (fixP->fx_file, fixP->fx_line,
24860 _("the offset 0x%08lX is not representable"),
24861 (unsigned long) addend_abs);
24862
24863 /* Extract the instruction. */
24864 insn = md_chars_to_number (buf, INSN_SIZE);
24865
24866 /* If the addend is positive, use an ADD instruction.
24867 Otherwise use a SUB. Take care not to destroy the S bit. */
24868 insn &= 0xff1fffff;
24869 if (value < 0)
24870 insn |= 1 << 22;
24871 else
24872 insn |= 1 << 23;
24873
24874 /* Place the encoded addend into the first 12 bits of the
24875 instruction. */
24876 insn &= 0xfffff000;
24877 insn |= encoded_addend;
24878
24879 /* Update the instruction. */
24880 md_number_to_chars (buf, insn, INSN_SIZE);
24881 }
24882 break;
24883
24884 case BFD_RELOC_ARM_LDR_PC_G0:
24885 case BFD_RELOC_ARM_LDR_PC_G1:
24886 case BFD_RELOC_ARM_LDR_PC_G2:
24887 case BFD_RELOC_ARM_LDR_SB_G0:
24888 case BFD_RELOC_ARM_LDR_SB_G1:
24889 case BFD_RELOC_ARM_LDR_SB_G2:
24890 gas_assert (!fixP->fx_done);
24891 if (!seg->use_rela_p)
24892 {
24893 bfd_vma insn;
24894 bfd_vma addend_abs = llabs (value);
24895
24896 /* Check that the absolute value of the addend can be
24897 encoded in 12 bits. */
24898 if (addend_abs >= 0x1000)
24899 as_bad_where (fixP->fx_file, fixP->fx_line,
24900 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24901 (unsigned long) addend_abs);
24902
24903 /* Extract the instruction. */
24904 insn = md_chars_to_number (buf, INSN_SIZE);
24905
24906 /* If the addend is negative, clear bit 23 of the instruction.
24907 Otherwise set it. */
24908 if (value < 0)
24909 insn &= ~(1 << 23);
24910 else
24911 insn |= 1 << 23;
24912
24913 /* Place the absolute value of the addend into the first 12 bits
24914 of the instruction. */
24915 insn &= 0xfffff000;
24916 insn |= addend_abs;
24917
24918 /* Update the instruction. */
24919 md_number_to_chars (buf, insn, INSN_SIZE);
24920 }
24921 break;
24922
24923 case BFD_RELOC_ARM_LDRS_PC_G0:
24924 case BFD_RELOC_ARM_LDRS_PC_G1:
24925 case BFD_RELOC_ARM_LDRS_PC_G2:
24926 case BFD_RELOC_ARM_LDRS_SB_G0:
24927 case BFD_RELOC_ARM_LDRS_SB_G1:
24928 case BFD_RELOC_ARM_LDRS_SB_G2:
24929 gas_assert (!fixP->fx_done);
24930 if (!seg->use_rela_p)
24931 {
24932 bfd_vma insn;
24933 bfd_vma addend_abs = llabs (value);
24934
24935 /* Check that the absolute value of the addend can be
24936 encoded in 8 bits. */
24937 if (addend_abs >= 0x100)
24938 as_bad_where (fixP->fx_file, fixP->fx_line,
24939 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24940 (unsigned long) addend_abs);
24941
24942 /* Extract the instruction. */
24943 insn = md_chars_to_number (buf, INSN_SIZE);
24944
24945 /* If the addend is negative, clear bit 23 of the instruction.
24946 Otherwise set it. */
24947 if (value < 0)
24948 insn &= ~(1 << 23);
24949 else
24950 insn |= 1 << 23;
24951
24952 /* Place the first four bits of the absolute value of the addend
24953 into the first 4 bits of the instruction, and the remaining
24954 four into bits 8 .. 11. */
24955 insn &= 0xfffff0f0;
24956 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24957
24958 /* Update the instruction. */
24959 md_number_to_chars (buf, insn, INSN_SIZE);
24960 }
24961 break;
24962
24963 case BFD_RELOC_ARM_LDC_PC_G0:
24964 case BFD_RELOC_ARM_LDC_PC_G1:
24965 case BFD_RELOC_ARM_LDC_PC_G2:
24966 case BFD_RELOC_ARM_LDC_SB_G0:
24967 case BFD_RELOC_ARM_LDC_SB_G1:
24968 case BFD_RELOC_ARM_LDC_SB_G2:
24969 gas_assert (!fixP->fx_done);
24970 if (!seg->use_rela_p)
24971 {
24972 bfd_vma insn;
24973 bfd_vma addend_abs = llabs (value);
24974
24975 /* Check that the absolute value of the addend is a multiple of
24976 four and, when divided by four, fits in 8 bits. */
24977 if (addend_abs & 0x3)
24978 as_bad_where (fixP->fx_file, fixP->fx_line,
24979 _("bad offset 0x%08lX (must be word-aligned)"),
24980 (unsigned long) addend_abs);
24981
24982 if ((addend_abs >> 2) > 0xff)
24983 as_bad_where (fixP->fx_file, fixP->fx_line,
24984 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24985 (unsigned long) addend_abs);
24986
24987 /* Extract the instruction. */
24988 insn = md_chars_to_number (buf, INSN_SIZE);
24989
24990 /* If the addend is negative, clear bit 23 of the instruction.
24991 Otherwise set it. */
24992 if (value < 0)
24993 insn &= ~(1 << 23);
24994 else
24995 insn |= 1 << 23;
24996
24997 /* Place the addend (divided by four) into the first eight
24998 bits of the instruction. */
24999 insn &= 0xfffffff0;
25000 insn |= addend_abs >> 2;
25001
25002 /* Update the instruction. */
25003 md_number_to_chars (buf, insn, INSN_SIZE);
25004 }
25005 break;
25006
25007 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25008 if (fixP->fx_addsy
25009 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25010 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25011 && ARM_IS_FUNC (fixP->fx_addsy)
25012 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25013 {
25014 /* Force a relocation for a branch 5 bits wide. */
25015 fixP->fx_done = 0;
25016 }
25017 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
25018 as_bad_where (fixP->fx_file, fixP->fx_line,
25019 BAD_BRANCH_OFF);
25020
25021 if (fixP->fx_done || !seg->use_rela_p)
25022 {
25023 addressT boff = value >> 1;
25024
25025 newval = md_chars_to_number (buf, THUMB_SIZE);
25026 newval |= (boff << 7);
25027 md_number_to_chars (buf, newval, THUMB_SIZE);
25028 }
25029 break;
25030
25031 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25032 if (fixP->fx_addsy
25033 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25034 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25035 && ARM_IS_FUNC (fixP->fx_addsy)
25036 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25037 {
25038 fixP->fx_done = 0;
25039 }
25040 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
25041 as_bad_where (fixP->fx_file, fixP->fx_line,
25042 _("branch out of range"));
25043
25044 if (fixP->fx_done || !seg->use_rela_p)
25045 {
25046 newval = md_chars_to_number (buf, THUMB_SIZE);
25047
25048 addressT boff = ((newval & 0x0780) >> 7) << 1;
25049 addressT diff = value - boff;
25050
25051 if (diff == 4)
25052 {
25053 newval |= 1 << 1; /* T bit. */
25054 }
25055 else if (diff != 2)
25056 {
25057 as_bad_where (fixP->fx_file, fixP->fx_line,
25058 _("out of range label-relative fixup value"));
25059 }
25060 md_number_to_chars (buf, newval, THUMB_SIZE);
25061 }
25062 break;
25063
25064 case BFD_RELOC_ARM_THUMB_BF17:
25065 if (fixP->fx_addsy
25066 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25067 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25068 && ARM_IS_FUNC (fixP->fx_addsy)
25069 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25070 {
25071 /* Force a relocation for a branch 17 bits wide. */
25072 fixP->fx_done = 0;
25073 }
25074
25075 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
25076 as_bad_where (fixP->fx_file, fixP->fx_line,
25077 BAD_BRANCH_OFF);
25078
25079 if (fixP->fx_done || !seg->use_rela_p)
25080 {
25081 offsetT newval2;
25082 addressT immA, immB, immC;
25083
25084 immA = (value & 0x0001f000) >> 12;
25085 immB = (value & 0x00000ffc) >> 2;
25086 immC = (value & 0x00000002) >> 1;
25087
25088 newval = md_chars_to_number (buf, THUMB_SIZE);
25089 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25090 newval |= immA;
25091 newval2 |= (immC << 11) | (immB << 1);
25092 md_number_to_chars (buf, newval, THUMB_SIZE);
25093 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25094 }
25095 break;
25096
25097 case BFD_RELOC_ARM_THUMB_BF19:
25098 if (fixP->fx_addsy
25099 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25100 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25101 && ARM_IS_FUNC (fixP->fx_addsy)
25102 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25103 {
25104 /* Force a relocation for a branch 19 bits wide. */
25105 fixP->fx_done = 0;
25106 }
25107
25108 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
25109 as_bad_where (fixP->fx_file, fixP->fx_line,
25110 BAD_BRANCH_OFF);
25111
25112 if (fixP->fx_done || !seg->use_rela_p)
25113 {
25114 offsetT newval2;
25115 addressT immA, immB, immC;
25116
25117 immA = (value & 0x0007f000) >> 12;
25118 immB = (value & 0x00000ffc) >> 2;
25119 immC = (value & 0x00000002) >> 1;
25120
25121 newval = md_chars_to_number (buf, THUMB_SIZE);
25122 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25123 newval |= immA;
25124 newval2 |= (immC << 11) | (immB << 1);
25125 md_number_to_chars (buf, newval, THUMB_SIZE);
25126 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25127 }
25128 break;
25129
25130 case BFD_RELOC_ARM_THUMB_BF13:
25131 if (fixP->fx_addsy
25132 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25133 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25134 && ARM_IS_FUNC (fixP->fx_addsy)
25135 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25136 {
25137 /* Force a relocation for a branch 13 bits wide. */
25138 fixP->fx_done = 0;
25139 }
25140
25141 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
25142 as_bad_where (fixP->fx_file, fixP->fx_line,
25143 BAD_BRANCH_OFF);
25144
25145 if (fixP->fx_done || !seg->use_rela_p)
25146 {
25147 offsetT newval2;
25148 addressT immA, immB, immC;
25149
25150 immA = (value & 0x00001000) >> 12;
25151 immB = (value & 0x00000ffc) >> 2;
25152 immC = (value & 0x00000002) >> 1;
25153
25154 newval = md_chars_to_number (buf, THUMB_SIZE);
25155 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25156 newval |= immA;
25157 newval2 |= (immC << 11) | (immB << 1);
25158 md_number_to_chars (buf, newval, THUMB_SIZE);
25159 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25160 }
25161 break;
25162
25163 case BFD_RELOC_ARM_THUMB_LOOP12:
25164 if (fixP->fx_addsy
25165 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25166 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25167 && ARM_IS_FUNC (fixP->fx_addsy)
25168 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25169 {
25170 /* Force a relocation for a branch 12 bits wide. */
25171 fixP->fx_done = 0;
25172 }
25173
25174 bfd_vma insn = get_thumb32_insn (buf);
25175 /* le lr, <label> or le <label> */
25176 if (((insn & 0xffffffff) == 0xf00fc001)
25177 || ((insn & 0xffffffff) == 0xf02fc001))
25178 value = -value;
25179
25180 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
25181 as_bad_where (fixP->fx_file, fixP->fx_line,
25182 BAD_BRANCH_OFF);
25183 if (fixP->fx_done || !seg->use_rela_p)
25184 {
25185 addressT imml, immh;
25186
25187 immh = (value & 0x00000ffc) >> 2;
25188 imml = (value & 0x00000002) >> 1;
25189
25190 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25191 newval |= (imml << 11) | (immh << 1);
25192 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
25193 }
25194 break;
25195
25196 case BFD_RELOC_ARM_V4BX:
25197 /* This will need to go in the object file. */
25198 fixP->fx_done = 0;
25199 break;
25200
25201 case BFD_RELOC_UNUSED:
25202 default:
25203 as_bad_where (fixP->fx_file, fixP->fx_line,
25204 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
25205 }
25206 }
25207
25208 /* Translate internal representation of relocation info to BFD target
25209 format. */
25210
25211 arelent *
25212 tc_gen_reloc (asection *section, fixS *fixp)
25213 {
25214 arelent * reloc;
25215 bfd_reloc_code_real_type code;
25216
25217 reloc = XNEW (arelent);
25218
25219 reloc->sym_ptr_ptr = XNEW (asymbol *);
25220 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
25221 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
25222
25223 if (fixp->fx_pcrel)
25224 {
25225 if (section->use_rela_p)
25226 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
25227 else
25228 fixp->fx_offset = reloc->address;
25229 }
25230 reloc->addend = fixp->fx_offset;
25231
25232 switch (fixp->fx_r_type)
25233 {
25234 case BFD_RELOC_8:
25235 if (fixp->fx_pcrel)
25236 {
25237 code = BFD_RELOC_8_PCREL;
25238 break;
25239 }
25240 /* Fall through. */
25241
25242 case BFD_RELOC_16:
25243 if (fixp->fx_pcrel)
25244 {
25245 code = BFD_RELOC_16_PCREL;
25246 break;
25247 }
25248 /* Fall through. */
25249
25250 case BFD_RELOC_32:
25251 if (fixp->fx_pcrel)
25252 {
25253 code = BFD_RELOC_32_PCREL;
25254 break;
25255 }
25256 /* Fall through. */
25257
25258 case BFD_RELOC_ARM_MOVW:
25259 if (fixp->fx_pcrel)
25260 {
25261 code = BFD_RELOC_ARM_MOVW_PCREL;
25262 break;
25263 }
25264 /* Fall through. */
25265
25266 case BFD_RELOC_ARM_MOVT:
25267 if (fixp->fx_pcrel)
25268 {
25269 code = BFD_RELOC_ARM_MOVT_PCREL;
25270 break;
25271 }
25272 /* Fall through. */
25273
25274 case BFD_RELOC_ARM_THUMB_MOVW:
25275 if (fixp->fx_pcrel)
25276 {
25277 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
25278 break;
25279 }
25280 /* Fall through. */
25281
25282 case BFD_RELOC_ARM_THUMB_MOVT:
25283 if (fixp->fx_pcrel)
25284 {
25285 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
25286 break;
25287 }
25288 /* Fall through. */
25289
25290 case BFD_RELOC_NONE:
25291 case BFD_RELOC_ARM_PCREL_BRANCH:
25292 case BFD_RELOC_ARM_PCREL_BLX:
25293 case BFD_RELOC_RVA:
25294 case BFD_RELOC_THUMB_PCREL_BRANCH7:
25295 case BFD_RELOC_THUMB_PCREL_BRANCH9:
25296 case BFD_RELOC_THUMB_PCREL_BRANCH12:
25297 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25298 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25299 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25300 case BFD_RELOC_VTABLE_ENTRY:
25301 case BFD_RELOC_VTABLE_INHERIT:
25302 #ifdef TE_PE
25303 case BFD_RELOC_32_SECREL:
25304 #endif
25305 code = fixp->fx_r_type;
25306 break;
25307
25308 case BFD_RELOC_THUMB_PCREL_BLX:
25309 #ifdef OBJ_ELF
25310 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25311 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
25312 else
25313 #endif
25314 code = BFD_RELOC_THUMB_PCREL_BLX;
25315 break;
25316
25317 case BFD_RELOC_ARM_LITERAL:
25318 case BFD_RELOC_ARM_HWLITERAL:
25319 /* If this is called then the a literal has
25320 been referenced across a section boundary. */
25321 as_bad_where (fixp->fx_file, fixp->fx_line,
25322 _("literal referenced across section boundary"));
25323 return NULL;
25324
25325 #ifdef OBJ_ELF
25326 case BFD_RELOC_ARM_TLS_CALL:
25327 case BFD_RELOC_ARM_THM_TLS_CALL:
25328 case BFD_RELOC_ARM_TLS_DESCSEQ:
25329 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25330 case BFD_RELOC_ARM_GOT32:
25331 case BFD_RELOC_ARM_GOTOFF:
25332 case BFD_RELOC_ARM_GOT_PREL:
25333 case BFD_RELOC_ARM_PLT32:
25334 case BFD_RELOC_ARM_TARGET1:
25335 case BFD_RELOC_ARM_ROSEGREL32:
25336 case BFD_RELOC_ARM_SBREL32:
25337 case BFD_RELOC_ARM_PREL31:
25338 case BFD_RELOC_ARM_TARGET2:
25339 case BFD_RELOC_ARM_TLS_LDO32:
25340 case BFD_RELOC_ARM_PCREL_CALL:
25341 case BFD_RELOC_ARM_PCREL_JUMP:
25342 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25343 case BFD_RELOC_ARM_ALU_PC_G0:
25344 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25345 case BFD_RELOC_ARM_ALU_PC_G1:
25346 case BFD_RELOC_ARM_ALU_PC_G2:
25347 case BFD_RELOC_ARM_LDR_PC_G0:
25348 case BFD_RELOC_ARM_LDR_PC_G1:
25349 case BFD_RELOC_ARM_LDR_PC_G2:
25350 case BFD_RELOC_ARM_LDRS_PC_G0:
25351 case BFD_RELOC_ARM_LDRS_PC_G1:
25352 case BFD_RELOC_ARM_LDRS_PC_G2:
25353 case BFD_RELOC_ARM_LDC_PC_G0:
25354 case BFD_RELOC_ARM_LDC_PC_G1:
25355 case BFD_RELOC_ARM_LDC_PC_G2:
25356 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25357 case BFD_RELOC_ARM_ALU_SB_G0:
25358 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25359 case BFD_RELOC_ARM_ALU_SB_G1:
25360 case BFD_RELOC_ARM_ALU_SB_G2:
25361 case BFD_RELOC_ARM_LDR_SB_G0:
25362 case BFD_RELOC_ARM_LDR_SB_G1:
25363 case BFD_RELOC_ARM_LDR_SB_G2:
25364 case BFD_RELOC_ARM_LDRS_SB_G0:
25365 case BFD_RELOC_ARM_LDRS_SB_G1:
25366 case BFD_RELOC_ARM_LDRS_SB_G2:
25367 case BFD_RELOC_ARM_LDC_SB_G0:
25368 case BFD_RELOC_ARM_LDC_SB_G1:
25369 case BFD_RELOC_ARM_LDC_SB_G2:
25370 case BFD_RELOC_ARM_V4BX:
25371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
25372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
25373 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
25374 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
25375 case BFD_RELOC_ARM_GOTFUNCDESC:
25376 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25377 case BFD_RELOC_ARM_FUNCDESC:
25378 case BFD_RELOC_ARM_THUMB_BF17:
25379 case BFD_RELOC_ARM_THUMB_BF19:
25380 case BFD_RELOC_ARM_THUMB_BF13:
25381 code = fixp->fx_r_type;
25382 break;
25383
25384 case BFD_RELOC_ARM_TLS_GOTDESC:
25385 case BFD_RELOC_ARM_TLS_GD32:
25386 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25387 case BFD_RELOC_ARM_TLS_LE32:
25388 case BFD_RELOC_ARM_TLS_IE32:
25389 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25390 case BFD_RELOC_ARM_TLS_LDM32:
25391 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25392 /* BFD will include the symbol's address in the addend.
25393 But we don't want that, so subtract it out again here. */
25394 if (!S_IS_COMMON (fixp->fx_addsy))
25395 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
25396 code = fixp->fx_r_type;
25397 break;
25398 #endif
25399
25400 case BFD_RELOC_ARM_IMMEDIATE:
25401 as_bad_where (fixp->fx_file, fixp->fx_line,
25402 _("internal relocation (type: IMMEDIATE) not fixed up"));
25403 return NULL;
25404
25405 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
25406 as_bad_where (fixp->fx_file, fixp->fx_line,
25407 _("ADRL used for a symbol not defined in the same file"));
25408 return NULL;
25409
25410 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25411 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25412 case BFD_RELOC_ARM_THUMB_LOOP12:
25413 as_bad_where (fixp->fx_file, fixp->fx_line,
25414 _("%s used for a symbol not defined in the same file"),
25415 bfd_get_reloc_code_name (fixp->fx_r_type));
25416 return NULL;
25417
25418 case BFD_RELOC_ARM_OFFSET_IMM:
25419 if (section->use_rela_p)
25420 {
25421 code = fixp->fx_r_type;
25422 break;
25423 }
25424
25425 if (fixp->fx_addsy != NULL
25426 && !S_IS_DEFINED (fixp->fx_addsy)
25427 && S_IS_LOCAL (fixp->fx_addsy))
25428 {
25429 as_bad_where (fixp->fx_file, fixp->fx_line,
25430 _("undefined local label `%s'"),
25431 S_GET_NAME (fixp->fx_addsy));
25432 return NULL;
25433 }
25434
25435 as_bad_where (fixp->fx_file, fixp->fx_line,
25436 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25437 return NULL;
25438
25439 default:
25440 {
25441 const char * type;
25442
25443 switch (fixp->fx_r_type)
25444 {
25445 case BFD_RELOC_NONE: type = "NONE"; break;
25446 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
25447 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
25448 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
25449 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
25450 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
25451 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
25452 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
25453 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
25454 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
25455 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
25456 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
25457 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
25458 default: type = _("<unknown>"); break;
25459 }
25460 as_bad_where (fixp->fx_file, fixp->fx_line,
25461 _("cannot represent %s relocation in this object file format"),
25462 type);
25463 return NULL;
25464 }
25465 }
25466
25467 #ifdef OBJ_ELF
25468 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
25469 && GOT_symbol
25470 && fixp->fx_addsy == GOT_symbol)
25471 {
25472 code = BFD_RELOC_ARM_GOTPC;
25473 reloc->addend = fixp->fx_offset = reloc->address;
25474 }
25475 #endif
25476
25477 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
25478
25479 if (reloc->howto == NULL)
25480 {
25481 as_bad_where (fixp->fx_file, fixp->fx_line,
25482 _("cannot represent %s relocation in this object file format"),
25483 bfd_get_reloc_code_name (code));
25484 return NULL;
25485 }
25486
25487 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25488 vtable entry to be used in the relocation's section offset. */
25489 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25490 reloc->address = fixp->fx_offset;
25491
25492 return reloc;
25493 }
25494
25495 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25496
25497 void
25498 cons_fix_new_arm (fragS * frag,
25499 int where,
25500 int size,
25501 expressionS * exp,
25502 bfd_reloc_code_real_type reloc)
25503 {
25504 int pcrel = 0;
25505
25506 /* Pick a reloc.
25507 FIXME: @@ Should look at CPU word size. */
25508 switch (size)
25509 {
25510 case 1:
25511 reloc = BFD_RELOC_8;
25512 break;
25513 case 2:
25514 reloc = BFD_RELOC_16;
25515 break;
25516 case 4:
25517 default:
25518 reloc = BFD_RELOC_32;
25519 break;
25520 case 8:
25521 reloc = BFD_RELOC_64;
25522 break;
25523 }
25524
25525 #ifdef TE_PE
25526 if (exp->X_op == O_secrel)
25527 {
25528 exp->X_op = O_symbol;
25529 reloc = BFD_RELOC_32_SECREL;
25530 }
25531 #endif
25532
25533 fix_new_exp (frag, where, size, exp, pcrel, reloc);
25534 }
25535
25536 #if defined (OBJ_COFF)
25537 void
25538 arm_validate_fix (fixS * fixP)
25539 {
25540 /* If the destination of the branch is a defined symbol which does not have
25541 the THUMB_FUNC attribute, then we must be calling a function which has
25542 the (interfacearm) attribute. We look for the Thumb entry point to that
25543 function and change the branch to refer to that function instead. */
25544 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
25545 && fixP->fx_addsy != NULL
25546 && S_IS_DEFINED (fixP->fx_addsy)
25547 && ! THUMB_IS_FUNC (fixP->fx_addsy))
25548 {
25549 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
25550 }
25551 }
25552 #endif
25553
25554
25555 int
25556 arm_force_relocation (struct fix * fixp)
25557 {
25558 #if defined (OBJ_COFF) && defined (TE_PE)
25559 if (fixp->fx_r_type == BFD_RELOC_RVA)
25560 return 1;
25561 #endif
25562
25563 /* In case we have a call or a branch to a function in ARM ISA mode from
25564 a thumb function or vice-versa force the relocation. These relocations
25565 are cleared off for some cores that might have blx and simple transformations
25566 are possible. */
25567
25568 #ifdef OBJ_ELF
25569 switch (fixp->fx_r_type)
25570 {
25571 case BFD_RELOC_ARM_PCREL_JUMP:
25572 case BFD_RELOC_ARM_PCREL_CALL:
25573 case BFD_RELOC_THUMB_PCREL_BLX:
25574 if (THUMB_IS_FUNC (fixp->fx_addsy))
25575 return 1;
25576 break;
25577
25578 case BFD_RELOC_ARM_PCREL_BLX:
25579 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25580 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25581 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25582 if (ARM_IS_FUNC (fixp->fx_addsy))
25583 return 1;
25584 break;
25585
25586 default:
25587 break;
25588 }
25589 #endif
25590
25591 /* Resolve these relocations even if the symbol is extern or weak.
25592 Technically this is probably wrong due to symbol preemption.
25593 In practice these relocations do not have enough range to be useful
25594 at dynamic link time, and some code (e.g. in the Linux kernel)
25595 expects these references to be resolved. */
25596 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25597 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25598 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25599 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25600 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25601 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25602 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25603 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25604 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25605 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25606 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25607 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25608 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25609 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25610 return 0;
25611
25612 /* Always leave these relocations for the linker. */
25613 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25614 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25615 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25616 return 1;
25617
25618 /* Always generate relocations against function symbols. */
25619 if (fixp->fx_r_type == BFD_RELOC_32
25620 && fixp->fx_addsy
25621 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25622 return 1;
25623
25624 return generic_force_reloc (fixp);
25625 }
25626
25627 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25628 /* Relocations against function names must be left unadjusted,
25629 so that the linker can use this information to generate interworking
25630 stubs. The MIPS version of this function
25631 also prevents relocations that are mips-16 specific, but I do not
25632 know why it does this.
25633
25634 FIXME:
25635 There is one other problem that ought to be addressed here, but
25636 which currently is not: Taking the address of a label (rather
25637 than a function) and then later jumping to that address. Such
25638 addresses also ought to have their bottom bit set (assuming that
25639 they reside in Thumb code), but at the moment they will not. */
25640
25641 bfd_boolean
25642 arm_fix_adjustable (fixS * fixP)
25643 {
25644 if (fixP->fx_addsy == NULL)
25645 return 1;
25646
25647 /* Preserve relocations against symbols with function type. */
25648 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25649 return FALSE;
25650
25651 if (THUMB_IS_FUNC (fixP->fx_addsy)
25652 && fixP->fx_subsy == NULL)
25653 return FALSE;
25654
25655 /* We need the symbol name for the VTABLE entries. */
25656 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25657 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25658 return FALSE;
25659
25660 /* Don't allow symbols to be discarded on GOT related relocs. */
25661 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25662 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25663 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25664 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25665 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25666 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25667 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25668 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25669 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25670 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25671 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25672 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25673 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25674 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25675 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25676 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25677 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25678 return FALSE;
25679
25680 /* Similarly for group relocations. */
25681 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25682 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25683 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25684 return FALSE;
25685
25686 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25687 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25688 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25689 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25690 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25691 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25692 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25693 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25694 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25695 return FALSE;
25696
25697 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25698 offsets, so keep these symbols. */
25699 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25700 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25701 return FALSE;
25702
25703 return TRUE;
25704 }
25705 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25706
25707 #ifdef OBJ_ELF
25708 const char *
25709 elf32_arm_target_format (void)
25710 {
25711 #ifdef TE_SYMBIAN
25712 return (target_big_endian
25713 ? "elf32-bigarm-symbian"
25714 : "elf32-littlearm-symbian");
25715 #elif defined (TE_VXWORKS)
25716 return (target_big_endian
25717 ? "elf32-bigarm-vxworks"
25718 : "elf32-littlearm-vxworks");
25719 #elif defined (TE_NACL)
25720 return (target_big_endian
25721 ? "elf32-bigarm-nacl"
25722 : "elf32-littlearm-nacl");
25723 #else
25724 if (arm_fdpic)
25725 {
25726 if (target_big_endian)
25727 return "elf32-bigarm-fdpic";
25728 else
25729 return "elf32-littlearm-fdpic";
25730 }
25731 else
25732 {
25733 if (target_big_endian)
25734 return "elf32-bigarm";
25735 else
25736 return "elf32-littlearm";
25737 }
25738 #endif
25739 }
25740
25741 void
25742 armelf_frob_symbol (symbolS * symp,
25743 int * puntp)
25744 {
25745 elf_frob_symbol (symp, puntp);
25746 }
25747 #endif
25748
25749 /* MD interface: Finalization. */
25750
25751 void
25752 arm_cleanup (void)
25753 {
25754 literal_pool * pool;
25755
25756 /* Ensure that all the IT blocks are properly closed. */
25757 check_it_blocks_finished ();
25758
25759 for (pool = list_of_pools; pool; pool = pool->next)
25760 {
25761 /* Put it at the end of the relevant section. */
25762 subseg_set (pool->section, pool->sub_section);
25763 #ifdef OBJ_ELF
25764 arm_elf_change_section ();
25765 #endif
25766 s_ltorg (0);
25767 }
25768 }
25769
25770 #ifdef OBJ_ELF
25771 /* Remove any excess mapping symbols generated for alignment frags in
25772 SEC. We may have created a mapping symbol before a zero byte
25773 alignment; remove it if there's a mapping symbol after the
25774 alignment. */
25775 static void
25776 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25777 void *dummy ATTRIBUTE_UNUSED)
25778 {
25779 segment_info_type *seginfo = seg_info (sec);
25780 fragS *fragp;
25781
25782 if (seginfo == NULL || seginfo->frchainP == NULL)
25783 return;
25784
25785 for (fragp = seginfo->frchainP->frch_root;
25786 fragp != NULL;
25787 fragp = fragp->fr_next)
25788 {
25789 symbolS *sym = fragp->tc_frag_data.last_map;
25790 fragS *next = fragp->fr_next;
25791
25792 /* Variable-sized frags have been converted to fixed size by
25793 this point. But if this was variable-sized to start with,
25794 there will be a fixed-size frag after it. So don't handle
25795 next == NULL. */
25796 if (sym == NULL || next == NULL)
25797 continue;
25798
25799 if (S_GET_VALUE (sym) < next->fr_address)
25800 /* Not at the end of this frag. */
25801 continue;
25802 know (S_GET_VALUE (sym) == next->fr_address);
25803
25804 do
25805 {
25806 if (next->tc_frag_data.first_map != NULL)
25807 {
25808 /* Next frag starts with a mapping symbol. Discard this
25809 one. */
25810 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25811 break;
25812 }
25813
25814 if (next->fr_next == NULL)
25815 {
25816 /* This mapping symbol is at the end of the section. Discard
25817 it. */
25818 know (next->fr_fix == 0 && next->fr_var == 0);
25819 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25820 break;
25821 }
25822
25823 /* As long as we have empty frags without any mapping symbols,
25824 keep looking. */
25825 /* If the next frag is non-empty and does not start with a
25826 mapping symbol, then this mapping symbol is required. */
25827 if (next->fr_address != next->fr_next->fr_address)
25828 break;
25829
25830 next = next->fr_next;
25831 }
25832 while (next != NULL);
25833 }
25834 }
25835 #endif
25836
25837 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25838 ARM ones. */
25839
25840 void
25841 arm_adjust_symtab (void)
25842 {
25843 #ifdef OBJ_COFF
25844 symbolS * sym;
25845
25846 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25847 {
25848 if (ARM_IS_THUMB (sym))
25849 {
25850 if (THUMB_IS_FUNC (sym))
25851 {
25852 /* Mark the symbol as a Thumb function. */
25853 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25854 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25855 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25856
25857 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25858 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25859 else
25860 as_bad (_("%s: unexpected function type: %d"),
25861 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25862 }
25863 else switch (S_GET_STORAGE_CLASS (sym))
25864 {
25865 case C_EXT:
25866 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25867 break;
25868 case C_STAT:
25869 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25870 break;
25871 case C_LABEL:
25872 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25873 break;
25874 default:
25875 /* Do nothing. */
25876 break;
25877 }
25878 }
25879
25880 if (ARM_IS_INTERWORK (sym))
25881 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25882 }
25883 #endif
25884 #ifdef OBJ_ELF
25885 symbolS * sym;
25886 char bind;
25887
25888 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25889 {
25890 if (ARM_IS_THUMB (sym))
25891 {
25892 elf_symbol_type * elf_sym;
25893
25894 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25895 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25896
25897 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25898 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25899 {
25900 /* If it's a .thumb_func, declare it as so,
25901 otherwise tag label as .code 16. */
25902 if (THUMB_IS_FUNC (sym))
25903 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25904 ST_BRANCH_TO_THUMB);
25905 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25906 elf_sym->internal_elf_sym.st_info =
25907 ELF_ST_INFO (bind, STT_ARM_16BIT);
25908 }
25909 }
25910 }
25911
25912 /* Remove any overlapping mapping symbols generated by alignment frags. */
25913 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25914 /* Now do generic ELF adjustments. */
25915 elf_adjust_symtab ();
25916 #endif
25917 }
25918
25919 /* MD interface: Initialization. */
25920
25921 static void
25922 set_constant_flonums (void)
25923 {
25924 int i;
25925
25926 for (i = 0; i < NUM_FLOAT_VALS; i++)
25927 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25928 abort ();
25929 }
25930
25931 /* Auto-select Thumb mode if it's the only available instruction set for the
25932 given architecture. */
25933
25934 static void
25935 autoselect_thumb_from_cpu_variant (void)
25936 {
25937 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25938 opcode_select (16);
25939 }
25940
25941 void
25942 md_begin (void)
25943 {
25944 unsigned mach;
25945 unsigned int i;
25946
25947 if ( (arm_ops_hsh = hash_new ()) == NULL
25948 || (arm_cond_hsh = hash_new ()) == NULL
25949 || (arm_shift_hsh = hash_new ()) == NULL
25950 || (arm_psr_hsh = hash_new ()) == NULL
25951 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25952 || (arm_reg_hsh = hash_new ()) == NULL
25953 || (arm_reloc_hsh = hash_new ()) == NULL
25954 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25955 as_fatal (_("virtual memory exhausted"));
25956
25957 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25958 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25959 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25960 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25961 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25962 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25963 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25964 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25965 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25966 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25967 (void *) (v7m_psrs + i));
25968 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25969 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25970 for (i = 0;
25971 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25972 i++)
25973 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25974 (void *) (barrier_opt_names + i));
25975 #ifdef OBJ_ELF
25976 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25977 {
25978 struct reloc_entry * entry = reloc_names + i;
25979
25980 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25981 /* This makes encode_branch() use the EABI versions of this relocation. */
25982 entry->reloc = BFD_RELOC_UNUSED;
25983
25984 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25985 }
25986 #endif
25987
25988 set_constant_flonums ();
25989
25990 /* Set the cpu variant based on the command-line options. We prefer
25991 -mcpu= over -march= if both are set (as for GCC); and we prefer
25992 -mfpu= over any other way of setting the floating point unit.
25993 Use of legacy options with new options are faulted. */
25994 if (legacy_cpu)
25995 {
25996 if (mcpu_cpu_opt || march_cpu_opt)
25997 as_bad (_("use of old and new-style options to set CPU type"));
25998
25999 selected_arch = *legacy_cpu;
26000 }
26001 else if (mcpu_cpu_opt)
26002 {
26003 selected_arch = *mcpu_cpu_opt;
26004 selected_ext = *mcpu_ext_opt;
26005 }
26006 else if (march_cpu_opt)
26007 {
26008 selected_arch = *march_cpu_opt;
26009 selected_ext = *march_ext_opt;
26010 }
26011 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
26012
26013 if (legacy_fpu)
26014 {
26015 if (mfpu_opt)
26016 as_bad (_("use of old and new-style options to set FPU type"));
26017
26018 selected_fpu = *legacy_fpu;
26019 }
26020 else if (mfpu_opt)
26021 selected_fpu = *mfpu_opt;
26022 else
26023 {
26024 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26025 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26026 /* Some environments specify a default FPU. If they don't, infer it
26027 from the processor. */
26028 if (mcpu_fpu_opt)
26029 selected_fpu = *mcpu_fpu_opt;
26030 else if (march_fpu_opt)
26031 selected_fpu = *march_fpu_opt;
26032 #else
26033 selected_fpu = fpu_default;
26034 #endif
26035 }
26036
26037 if (ARM_FEATURE_ZERO (selected_fpu))
26038 {
26039 if (!no_cpu_selected ())
26040 selected_fpu = fpu_default;
26041 else
26042 selected_fpu = fpu_arch_fpa;
26043 }
26044
26045 #ifdef CPU_DEFAULT
26046 if (ARM_FEATURE_ZERO (selected_arch))
26047 {
26048 selected_arch = cpu_default;
26049 selected_cpu = selected_arch;
26050 }
26051 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26052 #else
26053 /* Autodection of feature mode: allow all features in cpu_variant but leave
26054 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26055 after all instruction have been processed and we can decide what CPU
26056 should be selected. */
26057 if (ARM_FEATURE_ZERO (selected_arch))
26058 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
26059 else
26060 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26061 #endif
26062
26063 autoselect_thumb_from_cpu_variant ();
26064
26065 arm_arch_used = thumb_arch_used = arm_arch_none;
26066
26067 #if defined OBJ_COFF || defined OBJ_ELF
26068 {
26069 unsigned int flags = 0;
26070
26071 #if defined OBJ_ELF
26072 flags = meabi_flags;
26073
26074 switch (meabi_flags)
26075 {
26076 case EF_ARM_EABI_UNKNOWN:
26077 #endif
26078 /* Set the flags in the private structure. */
26079 if (uses_apcs_26) flags |= F_APCS26;
26080 if (support_interwork) flags |= F_INTERWORK;
26081 if (uses_apcs_float) flags |= F_APCS_FLOAT;
26082 if (pic_code) flags |= F_PIC;
26083 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
26084 flags |= F_SOFT_FLOAT;
26085
26086 switch (mfloat_abi_opt)
26087 {
26088 case ARM_FLOAT_ABI_SOFT:
26089 case ARM_FLOAT_ABI_SOFTFP:
26090 flags |= F_SOFT_FLOAT;
26091 break;
26092
26093 case ARM_FLOAT_ABI_HARD:
26094 if (flags & F_SOFT_FLOAT)
26095 as_bad (_("hard-float conflicts with specified fpu"));
26096 break;
26097 }
26098
26099 /* Using pure-endian doubles (even if soft-float). */
26100 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
26101 flags |= F_VFP_FLOAT;
26102
26103 #if defined OBJ_ELF
26104 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
26105 flags |= EF_ARM_MAVERICK_FLOAT;
26106 break;
26107
26108 case EF_ARM_EABI_VER4:
26109 case EF_ARM_EABI_VER5:
26110 /* No additional flags to set. */
26111 break;
26112
26113 default:
26114 abort ();
26115 }
26116 #endif
26117 bfd_set_private_flags (stdoutput, flags);
26118
26119 /* We have run out flags in the COFF header to encode the
26120 status of ATPCS support, so instead we create a dummy,
26121 empty, debug section called .arm.atpcs. */
26122 if (atpcs)
26123 {
26124 asection * sec;
26125
26126 sec = bfd_make_section (stdoutput, ".arm.atpcs");
26127
26128 if (sec != NULL)
26129 {
26130 bfd_set_section_flags
26131 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
26132 bfd_set_section_size (stdoutput, sec, 0);
26133 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
26134 }
26135 }
26136 }
26137 #endif
26138
26139 /* Record the CPU type as well. */
26140 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
26141 mach = bfd_mach_arm_iWMMXt2;
26142 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
26143 mach = bfd_mach_arm_iWMMXt;
26144 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
26145 mach = bfd_mach_arm_XScale;
26146 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
26147 mach = bfd_mach_arm_ep9312;
26148 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
26149 mach = bfd_mach_arm_5TE;
26150 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
26151 {
26152 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26153 mach = bfd_mach_arm_5T;
26154 else
26155 mach = bfd_mach_arm_5;
26156 }
26157 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
26158 {
26159 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26160 mach = bfd_mach_arm_4T;
26161 else
26162 mach = bfd_mach_arm_4;
26163 }
26164 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
26165 mach = bfd_mach_arm_3M;
26166 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
26167 mach = bfd_mach_arm_3;
26168 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
26169 mach = bfd_mach_arm_2a;
26170 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
26171 mach = bfd_mach_arm_2;
26172 else
26173 mach = bfd_mach_arm_unknown;
26174
26175 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
26176 }
26177
26178 /* Command line processing. */
26179
26180 /* md_parse_option
26181 Invocation line includes a switch not recognized by the base assembler.
26182 See if it's a processor-specific option.
26183
26184 This routine is somewhat complicated by the need for backwards
26185 compatibility (since older releases of gcc can't be changed).
26186 The new options try to make the interface as compatible as
26187 possible with GCC.
26188
26189 New options (supported) are:
26190
26191 -mcpu=<cpu name> Assemble for selected processor
26192 -march=<architecture name> Assemble for selected architecture
26193 -mfpu=<fpu architecture> Assemble for selected FPU.
26194 -EB/-mbig-endian Big-endian
26195 -EL/-mlittle-endian Little-endian
26196 -k Generate PIC code
26197 -mthumb Start in Thumb mode
26198 -mthumb-interwork Code supports ARM/Thumb interworking
26199
26200 -m[no-]warn-deprecated Warn about deprecated features
26201 -m[no-]warn-syms Warn when symbols match instructions
26202
26203 For now we will also provide support for:
26204
26205 -mapcs-32 32-bit Program counter
26206 -mapcs-26 26-bit Program counter
26207 -macps-float Floats passed in FP registers
26208 -mapcs-reentrant Reentrant code
26209 -matpcs
26210 (sometime these will probably be replaced with -mapcs=<list of options>
26211 and -matpcs=<list of options>)
26212
26213 The remaining options are only supported for back-wards compatibility.
26214 Cpu variants, the arm part is optional:
26215 -m[arm]1 Currently not supported.
26216 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26217 -m[arm]3 Arm 3 processor
26218 -m[arm]6[xx], Arm 6 processors
26219 -m[arm]7[xx][t][[d]m] Arm 7 processors
26220 -m[arm]8[10] Arm 8 processors
26221 -m[arm]9[20][tdmi] Arm 9 processors
26222 -mstrongarm[110[0]] StrongARM processors
26223 -mxscale XScale processors
26224 -m[arm]v[2345[t[e]]] Arm architectures
26225 -mall All (except the ARM1)
26226 FP variants:
26227 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26228 -mfpe-old (No float load/store multiples)
26229 -mvfpxd VFP Single precision
26230 -mvfp All VFP
26231 -mno-fpu Disable all floating point instructions
26232
26233 The following CPU names are recognized:
26234 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26235 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26236 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26237 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26238 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26239 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26240 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26241
26242 */
26243
26244 const char * md_shortopts = "m:k";
26245
26246 #ifdef ARM_BI_ENDIAN
26247 #define OPTION_EB (OPTION_MD_BASE + 0)
26248 #define OPTION_EL (OPTION_MD_BASE + 1)
26249 #else
26250 #if TARGET_BYTES_BIG_ENDIAN
26251 #define OPTION_EB (OPTION_MD_BASE + 0)
26252 #else
26253 #define OPTION_EL (OPTION_MD_BASE + 1)
26254 #endif
26255 #endif
26256 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26257 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26258
26259 struct option md_longopts[] =
26260 {
26261 #ifdef OPTION_EB
26262 {"EB", no_argument, NULL, OPTION_EB},
26263 #endif
26264 #ifdef OPTION_EL
26265 {"EL", no_argument, NULL, OPTION_EL},
26266 #endif
26267 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
26268 #ifdef OBJ_ELF
26269 {"fdpic", no_argument, NULL, OPTION_FDPIC},
26270 #endif
26271 {NULL, no_argument, NULL, 0}
26272 };
26273
26274 size_t md_longopts_size = sizeof (md_longopts);
26275
26276 struct arm_option_table
26277 {
26278 const char * option; /* Option name to match. */
26279 const char * help; /* Help information. */
26280 int * var; /* Variable to change. */
26281 int value; /* What to change it to. */
26282 const char * deprecated; /* If non-null, print this message. */
26283 };
26284
26285 struct arm_option_table arm_opts[] =
26286 {
26287 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
26288 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
26289 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26290 &support_interwork, 1, NULL},
26291 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
26292 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
26293 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
26294 1, NULL},
26295 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
26296 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
26297 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
26298 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
26299 NULL},
26300
26301 /* These are recognized by the assembler, but have no affect on code. */
26302 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
26303 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
26304
26305 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
26306 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26307 &warn_on_deprecated, 0, NULL},
26308 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
26309 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
26310 {NULL, NULL, NULL, 0, NULL}
26311 };
26312
26313 struct arm_legacy_option_table
26314 {
26315 const char * option; /* Option name to match. */
26316 const arm_feature_set ** var; /* Variable to change. */
26317 const arm_feature_set value; /* What to change it to. */
26318 const char * deprecated; /* If non-null, print this message. */
26319 };
26320
26321 const struct arm_legacy_option_table arm_legacy_opts[] =
26322 {
26323 /* DON'T add any new processors to this list -- we want the whole list
26324 to go away... Add them to the processors table instead. */
26325 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26326 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26327 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26328 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26329 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26330 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26331 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26332 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26333 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26334 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26335 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26336 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26337 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26338 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26339 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26340 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26341 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26342 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26343 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26344 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26345 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26346 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26347 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26348 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26349 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26350 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26351 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26352 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26353 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26354 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26355 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26356 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26357 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26358 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26359 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26360 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26361 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26362 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26363 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26364 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26365 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26366 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26367 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26368 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26369 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26370 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26371 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26372 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26373 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26374 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26375 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26376 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26377 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26378 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26379 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26380 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26381 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26382 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26383 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26384 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26385 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26386 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26387 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26388 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26389 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26390 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26391 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26392 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26393 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
26394 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
26395 N_("use -mcpu=strongarm110")},
26396 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
26397 N_("use -mcpu=strongarm1100")},
26398 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
26399 N_("use -mcpu=strongarm1110")},
26400 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
26401 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
26402 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
26403
26404 /* Architecture variants -- don't add any more to this list either. */
26405 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26406 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26407 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26408 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26409 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26410 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26411 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26412 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26413 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26414 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26415 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26416 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26417 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26418 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26419 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26420 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26421 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26422 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26423
26424 /* Floating point variants -- don't add any more to this list either. */
26425 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
26426 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
26427 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
26428 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
26429 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26430
26431 {NULL, NULL, ARM_ARCH_NONE, NULL}
26432 };
26433
26434 struct arm_cpu_option_table
26435 {
26436 const char * name;
26437 size_t name_len;
26438 const arm_feature_set value;
26439 const arm_feature_set ext;
26440 /* For some CPUs we assume an FPU unless the user explicitly sets
26441 -mfpu=... */
26442 const arm_feature_set default_fpu;
26443 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26444 case. */
26445 const char * canonical_name;
26446 };
26447
26448 /* This list should, at a minimum, contain all the cpu names
26449 recognized by GCC. */
26450 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26451
26452 static const struct arm_cpu_option_table arm_cpus[] =
26453 {
26454 ARM_CPU_OPT ("all", NULL, ARM_ANY,
26455 ARM_ARCH_NONE,
26456 FPU_ARCH_FPA),
26457 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
26458 ARM_ARCH_NONE,
26459 FPU_ARCH_FPA),
26460 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
26461 ARM_ARCH_NONE,
26462 FPU_ARCH_FPA),
26463 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
26464 ARM_ARCH_NONE,
26465 FPU_ARCH_FPA),
26466 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
26467 ARM_ARCH_NONE,
26468 FPU_ARCH_FPA),
26469 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
26470 ARM_ARCH_NONE,
26471 FPU_ARCH_FPA),
26472 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
26473 ARM_ARCH_NONE,
26474 FPU_ARCH_FPA),
26475 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
26476 ARM_ARCH_NONE,
26477 FPU_ARCH_FPA),
26478 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
26479 ARM_ARCH_NONE,
26480 FPU_ARCH_FPA),
26481 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
26482 ARM_ARCH_NONE,
26483 FPU_ARCH_FPA),
26484 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
26485 ARM_ARCH_NONE,
26486 FPU_ARCH_FPA),
26487 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
26488 ARM_ARCH_NONE,
26489 FPU_ARCH_FPA),
26490 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
26491 ARM_ARCH_NONE,
26492 FPU_ARCH_FPA),
26493 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
26494 ARM_ARCH_NONE,
26495 FPU_ARCH_FPA),
26496 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
26497 ARM_ARCH_NONE,
26498 FPU_ARCH_FPA),
26499 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
26500 ARM_ARCH_NONE,
26501 FPU_ARCH_FPA),
26502 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
26503 ARM_ARCH_NONE,
26504 FPU_ARCH_FPA),
26505 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
26506 ARM_ARCH_NONE,
26507 FPU_ARCH_FPA),
26508 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
26509 ARM_ARCH_NONE,
26510 FPU_ARCH_FPA),
26511 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
26512 ARM_ARCH_NONE,
26513 FPU_ARCH_FPA),
26514 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
26515 ARM_ARCH_NONE,
26516 FPU_ARCH_FPA),
26517 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
26518 ARM_ARCH_NONE,
26519 FPU_ARCH_FPA),
26520 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
26521 ARM_ARCH_NONE,
26522 FPU_ARCH_FPA),
26523 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
26524 ARM_ARCH_NONE,
26525 FPU_ARCH_FPA),
26526 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
26527 ARM_ARCH_NONE,
26528 FPU_ARCH_FPA),
26529 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
26530 ARM_ARCH_NONE,
26531 FPU_ARCH_FPA),
26532 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
26533 ARM_ARCH_NONE,
26534 FPU_ARCH_FPA),
26535 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
26536 ARM_ARCH_NONE,
26537 FPU_ARCH_FPA),
26538 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
26539 ARM_ARCH_NONE,
26540 FPU_ARCH_FPA),
26541 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
26542 ARM_ARCH_NONE,
26543 FPU_ARCH_FPA),
26544 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
26545 ARM_ARCH_NONE,
26546 FPU_ARCH_FPA),
26547 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
26548 ARM_ARCH_NONE,
26549 FPU_ARCH_FPA),
26550 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
26551 ARM_ARCH_NONE,
26552 FPU_ARCH_FPA),
26553 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
26554 ARM_ARCH_NONE,
26555 FPU_ARCH_FPA),
26556 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
26557 ARM_ARCH_NONE,
26558 FPU_ARCH_FPA),
26559 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26560 ARM_ARCH_NONE,
26561 FPU_ARCH_FPA),
26562 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26563 ARM_ARCH_NONE,
26564 FPU_ARCH_FPA),
26565 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26566 ARM_ARCH_NONE,
26567 FPU_ARCH_FPA),
26568 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26569 ARM_ARCH_NONE,
26570 FPU_ARCH_FPA),
26571 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26572 ARM_ARCH_NONE,
26573 FPU_ARCH_FPA),
26574 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26575 ARM_ARCH_NONE,
26576 FPU_ARCH_FPA),
26577 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26578 ARM_ARCH_NONE,
26579 FPU_ARCH_FPA),
26580 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26581 ARM_ARCH_NONE,
26582 FPU_ARCH_FPA),
26583 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26584 ARM_ARCH_NONE,
26585 FPU_ARCH_FPA),
26586 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26587 ARM_ARCH_NONE,
26588 FPU_ARCH_FPA),
26589 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26590 ARM_ARCH_NONE,
26591 FPU_ARCH_FPA),
26592
26593 /* For V5 or later processors we default to using VFP; but the user
26594 should really set the FPU type explicitly. */
26595 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26596 ARM_ARCH_NONE,
26597 FPU_ARCH_VFP_V2),
26598 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26599 ARM_ARCH_NONE,
26600 FPU_ARCH_VFP_V2),
26601 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26602 ARM_ARCH_NONE,
26603 FPU_ARCH_VFP_V2),
26604 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26605 ARM_ARCH_NONE,
26606 FPU_ARCH_VFP_V2),
26607 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26608 ARM_ARCH_NONE,
26609 FPU_ARCH_VFP_V2),
26610 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26611 ARM_ARCH_NONE,
26612 FPU_ARCH_VFP_V2),
26613 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26614 ARM_ARCH_NONE,
26615 FPU_ARCH_VFP_V2),
26616 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26617 ARM_ARCH_NONE,
26618 FPU_ARCH_VFP_V2),
26619 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26620 ARM_ARCH_NONE,
26621 FPU_ARCH_VFP_V2),
26622 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26623 ARM_ARCH_NONE,
26624 FPU_ARCH_VFP_V2),
26625 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26626 ARM_ARCH_NONE,
26627 FPU_ARCH_VFP_V2),
26628 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26629 ARM_ARCH_NONE,
26630 FPU_ARCH_VFP_V2),
26631 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26632 ARM_ARCH_NONE,
26633 FPU_ARCH_VFP_V1),
26634 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26635 ARM_ARCH_NONE,
26636 FPU_ARCH_VFP_V1),
26637 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26638 ARM_ARCH_NONE,
26639 FPU_ARCH_VFP_V2),
26640 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26641 ARM_ARCH_NONE,
26642 FPU_ARCH_VFP_V2),
26643 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26644 ARM_ARCH_NONE,
26645 FPU_ARCH_VFP_V1),
26646 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26647 ARM_ARCH_NONE,
26648 FPU_ARCH_VFP_V2),
26649 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26650 ARM_ARCH_NONE,
26651 FPU_ARCH_VFP_V2),
26652 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26653 ARM_ARCH_NONE,
26654 FPU_ARCH_VFP_V2),
26655 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26656 ARM_ARCH_NONE,
26657 FPU_ARCH_VFP_V2),
26658 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26659 ARM_ARCH_NONE,
26660 FPU_ARCH_VFP_V2),
26661 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26662 ARM_ARCH_NONE,
26663 FPU_ARCH_VFP_V2),
26664 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26665 ARM_ARCH_NONE,
26666 FPU_ARCH_VFP_V2),
26667 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26668 ARM_ARCH_NONE,
26669 FPU_ARCH_VFP_V2),
26670 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26671 ARM_ARCH_NONE,
26672 FPU_ARCH_VFP_V2),
26673 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26674 ARM_ARCH_NONE,
26675 FPU_NONE),
26676 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26677 ARM_ARCH_NONE,
26678 FPU_NONE),
26679 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26680 ARM_ARCH_NONE,
26681 FPU_ARCH_VFP_V2),
26682 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26683 ARM_ARCH_NONE,
26684 FPU_ARCH_VFP_V2),
26685 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26686 ARM_ARCH_NONE,
26687 FPU_ARCH_VFP_V2),
26688 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26689 ARM_ARCH_NONE,
26690 FPU_NONE),
26691 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26692 ARM_ARCH_NONE,
26693 FPU_NONE),
26694 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26695 ARM_ARCH_NONE,
26696 FPU_ARCH_VFP_V2),
26697 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26698 ARM_ARCH_NONE,
26699 FPU_NONE),
26700 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26701 ARM_ARCH_NONE,
26702 FPU_ARCH_VFP_V2),
26703 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26704 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26705 FPU_NONE),
26706 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26707 ARM_ARCH_NONE,
26708 FPU_ARCH_NEON_VFP_V4),
26709 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26710 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26711 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26712 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26713 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26714 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26715 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26716 ARM_ARCH_NONE,
26717 FPU_ARCH_NEON_VFP_V4),
26718 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26719 ARM_ARCH_NONE,
26720 FPU_ARCH_NEON_VFP_V4),
26721 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26722 ARM_ARCH_NONE,
26723 FPU_ARCH_NEON_VFP_V4),
26724 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26725 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26726 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26727 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26728 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26729 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26730 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26731 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26732 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26733 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26734 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26735 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26736 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26737 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26738 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26739 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26740 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26741 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26742 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26743 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26744 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26745 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26746 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26747 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26748 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26749 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26750 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26751 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
26752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26753 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26754 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26755 ARM_ARCH_NONE,
26756 FPU_NONE),
26757 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26758 ARM_ARCH_NONE,
26759 FPU_ARCH_VFP_V3D16),
26760 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26761 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26762 FPU_NONE),
26763 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26764 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26765 FPU_ARCH_VFP_V3D16),
26766 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26767 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26768 FPU_ARCH_VFP_V3D16),
26769 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26770 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26771 FPU_ARCH_NEON_VFP_ARMV8),
26772 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26773 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26774 FPU_NONE),
26775 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26776 ARM_ARCH_NONE,
26777 FPU_NONE),
26778 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26779 ARM_ARCH_NONE,
26780 FPU_NONE),
26781 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26782 ARM_ARCH_NONE,
26783 FPU_NONE),
26784 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26785 ARM_ARCH_NONE,
26786 FPU_NONE),
26787 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26788 ARM_ARCH_NONE,
26789 FPU_NONE),
26790 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26791 ARM_ARCH_NONE,
26792 FPU_NONE),
26793 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26794 ARM_ARCH_NONE,
26795 FPU_NONE),
26796 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26797 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26798 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26799 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
26800 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26801 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26802 /* ??? XSCALE is really an architecture. */
26803 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26804 ARM_ARCH_NONE,
26805 FPU_ARCH_VFP_V2),
26806
26807 /* ??? iwmmxt is not a processor. */
26808 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26809 ARM_ARCH_NONE,
26810 FPU_ARCH_VFP_V2),
26811 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26812 ARM_ARCH_NONE,
26813 FPU_ARCH_VFP_V2),
26814 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26815 ARM_ARCH_NONE,
26816 FPU_ARCH_VFP_V2),
26817
26818 /* Maverick. */
26819 ARM_CPU_OPT ("ep9312", "ARM920T",
26820 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26821 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26822
26823 /* Marvell processors. */
26824 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26825 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26826 FPU_ARCH_VFP_V3D16),
26827 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26828 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26829 FPU_ARCH_NEON_VFP_V4),
26830
26831 /* APM X-Gene family. */
26832 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26833 ARM_ARCH_NONE,
26834 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26835 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26836 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26837 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26838
26839 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26840 };
26841 #undef ARM_CPU_OPT
26842
26843 struct arm_ext_table
26844 {
26845 const char * name;
26846 size_t name_len;
26847 const arm_feature_set merge;
26848 const arm_feature_set clear;
26849 };
26850
26851 struct arm_arch_option_table
26852 {
26853 const char * name;
26854 size_t name_len;
26855 const arm_feature_set value;
26856 const arm_feature_set default_fpu;
26857 const struct arm_ext_table * ext_table;
26858 };
26859
26860 /* Used to add support for +E and +noE extension. */
26861 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
26862 /* Used to add support for a +E extension. */
26863 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
26864 /* Used to add support for a +noE extension. */
26865 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
26866
26867 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
26868 ~0 & ~FPU_ENDIAN_PURE)
26869
26870 static const struct arm_ext_table armv5te_ext_table[] =
26871 {
26872 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
26873 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26874 };
26875
26876 static const struct arm_ext_table armv7_ext_table[] =
26877 {
26878 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
26879 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26880 };
26881
26882 static const struct arm_ext_table armv7ve_ext_table[] =
26883 {
26884 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
26885 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
26886 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
26887 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
26888 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
26889 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
26890 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
26891
26892 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
26893 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
26894
26895 /* Aliases for +simd. */
26896 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
26897
26898 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
26899 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
26900 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
26901
26902 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26903 };
26904
26905 static const struct arm_ext_table armv7a_ext_table[] =
26906 {
26907 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
26908 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
26909 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
26910 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
26911 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
26912 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
26913 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
26914
26915 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
26916 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
26917
26918 /* Aliases for +simd. */
26919 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
26920 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
26921
26922 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
26923 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
26924
26925 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
26926 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
26927 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26928 };
26929
26930 static const struct arm_ext_table armv7r_ext_table[] =
26931 {
26932 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
26933 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
26934 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
26935 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
26936 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
26937 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
26938 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26939 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
26940 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26941 };
26942
26943 static const struct arm_ext_table armv7em_ext_table[] =
26944 {
26945 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
26946 /* Alias for +fp, used to be known as fpv4-sp-d16. */
26947 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
26948 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
26949 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
26950 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
26951 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26952 };
26953
26954 static const struct arm_ext_table armv8a_ext_table[] =
26955 {
26956 ARM_ADD ("crc", ARCH_CRC_ARMV8),
26957 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
26958 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26959 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
26960
26961 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26962 should use the +simd option to turn on FP. */
26963 ARM_REMOVE ("fp", ALL_FP),
26964 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
26965 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
26966 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26967 };
26968
26969
26970 static const struct arm_ext_table armv81a_ext_table[] =
26971 {
26972 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
26973 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
26974 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
26975
26976 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26977 should use the +simd option to turn on FP. */
26978 ARM_REMOVE ("fp", ALL_FP),
26979 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
26980 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
26981 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26982 };
26983
26984 static const struct arm_ext_table armv82a_ext_table[] =
26985 {
26986 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
26987 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
26988 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
26989 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
26990 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
26991 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
26992
26993 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26994 should use the +simd option to turn on FP. */
26995 ARM_REMOVE ("fp", ALL_FP),
26996 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
26997 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
26998 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26999 };
27000
27001 static const struct arm_ext_table armv84a_ext_table[] =
27002 {
27003 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27004 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27005 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27006 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27007
27008 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27009 should use the +simd option to turn on FP. */
27010 ARM_REMOVE ("fp", ALL_FP),
27011 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27012 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27013 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27014 };
27015
27016 static const struct arm_ext_table armv85a_ext_table[] =
27017 {
27018 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27019 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27020 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27021 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27022
27023 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27024 should use the +simd option to turn on FP. */
27025 ARM_REMOVE ("fp", ALL_FP),
27026 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27027 };
27028
27029 static const struct arm_ext_table armv8m_main_ext_table[] =
27030 {
27031 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27032 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27033 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
27034 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27035 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27036 };
27037
27038 static const struct arm_ext_table armv8_1m_main_ext_table[] =
27039 {
27040 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27041 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27042 ARM_EXT ("fp",
27043 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27044 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
27045 ALL_FP),
27046 ARM_ADD ("fp.dp",
27047 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27048 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
27049 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27050 };
27051
27052 static const struct arm_ext_table armv8r_ext_table[] =
27053 {
27054 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27055 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27056 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27057 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27058 ARM_REMOVE ("fp", ALL_FP),
27059 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
27060 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27061 };
27062
27063 /* This list should, at a minimum, contain all the architecture names
27064 recognized by GCC. */
27065 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27066 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27067 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27068
27069 static const struct arm_arch_option_table arm_archs[] =
27070 {
27071 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
27072 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
27073 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
27074 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
27075 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
27076 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
27077 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
27078 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
27079 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
27080 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
27081 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
27082 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
27083 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
27084 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
27085 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
27086 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
27087 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
27088 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27089 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27090 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
27091 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
27092 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27093 kept to preserve existing behaviour. */
27094 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27095 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27096 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
27097 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
27098 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
27099 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27100 kept to preserve existing behaviour. */
27101 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27102 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27103 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
27104 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
27105 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
27106 /* The official spelling of the ARMv7 profile variants is the dashed form.
27107 Accept the non-dashed form for compatibility with old toolchains. */
27108 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27109 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
27110 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27111 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27112 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27113 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27114 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27115 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
27116 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
27117 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
27118 armv8m_main),
27119 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
27120 armv8_1m_main),
27121 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
27122 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
27123 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
27124 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
27125 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
27126 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
27127 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
27128 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
27129 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
27130 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
27131 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27132 };
27133 #undef ARM_ARCH_OPT
27134
27135 /* ISA extensions in the co-processor and main instruction set space. */
27136
27137 struct arm_option_extension_value_table
27138 {
27139 const char * name;
27140 size_t name_len;
27141 const arm_feature_set merge_value;
27142 const arm_feature_set clear_value;
27143 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27144 indicates that an extension is available for all architectures while
27145 ARM_ANY marks an empty entry. */
27146 const arm_feature_set allowed_archs[2];
27147 };
27148
27149 /* The following table must be in alphabetical order with a NULL last entry. */
27150
27151 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27152 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27153
27154 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27155 use the context sensitive approach using arm_ext_table's. */
27156 static const struct arm_option_extension_value_table arm_extensions[] =
27157 {
27158 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27159 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27160 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27161 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
27162 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27163 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
27164 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
27165 ARM_ARCH_V8_2A),
27166 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27167 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27168 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
27169 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
27170 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27171 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27172 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27173 ARM_ARCH_V8_2A),
27174 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27175 | ARM_EXT2_FP16_FML),
27176 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27177 | ARM_EXT2_FP16_FML),
27178 ARM_ARCH_V8_2A),
27179 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27180 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27181 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27182 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27183 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27184 Thumb divide instruction. Due to this having the same name as the
27185 previous entry, this will be ignored when doing command-line parsing and
27186 only considered by build attribute selection code. */
27187 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27188 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27189 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
27190 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
27191 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
27192 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
27193 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
27194 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
27195 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
27196 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27197 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27198 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27199 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27200 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27201 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27202 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
27203 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
27204 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
27205 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27206 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27207 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27208 ARM_ARCH_V8A),
27209 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
27210 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
27211 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27212 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
27213 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
27214 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27215 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27217 ARM_ARCH_V8A),
27218 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27219 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27220 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
27221 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27222 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
27223 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
27224 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27225 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
27226 | ARM_EXT_DIV),
27227 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
27228 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27229 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
27230 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
27231 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
27232 };
27233 #undef ARM_EXT_OPT
27234
27235 /* ISA floating-point and Advanced SIMD extensions. */
27236 struct arm_option_fpu_value_table
27237 {
27238 const char * name;
27239 const arm_feature_set value;
27240 };
27241
27242 /* This list should, at a minimum, contain all the fpu names
27243 recognized by GCC. */
27244 static const struct arm_option_fpu_value_table arm_fpus[] =
27245 {
27246 {"softfpa", FPU_NONE},
27247 {"fpe", FPU_ARCH_FPE},
27248 {"fpe2", FPU_ARCH_FPE},
27249 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
27250 {"fpa", FPU_ARCH_FPA},
27251 {"fpa10", FPU_ARCH_FPA},
27252 {"fpa11", FPU_ARCH_FPA},
27253 {"arm7500fe", FPU_ARCH_FPA},
27254 {"softvfp", FPU_ARCH_VFP},
27255 {"softvfp+vfp", FPU_ARCH_VFP_V2},
27256 {"vfp", FPU_ARCH_VFP_V2},
27257 {"vfp9", FPU_ARCH_VFP_V2},
27258 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
27259 {"vfp10", FPU_ARCH_VFP_V2},
27260 {"vfp10-r0", FPU_ARCH_VFP_V1},
27261 {"vfpxd", FPU_ARCH_VFP_V1xD},
27262 {"vfpv2", FPU_ARCH_VFP_V2},
27263 {"vfpv3", FPU_ARCH_VFP_V3},
27264 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
27265 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
27266 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
27267 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
27268 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
27269 {"arm1020t", FPU_ARCH_VFP_V1},
27270 {"arm1020e", FPU_ARCH_VFP_V2},
27271 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
27272 {"arm1136jf-s", FPU_ARCH_VFP_V2},
27273 {"maverick", FPU_ARCH_MAVERICK},
27274 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27275 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27276 {"neon-fp16", FPU_ARCH_NEON_FP16},
27277 {"vfpv4", FPU_ARCH_VFP_V4},
27278 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
27279 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
27280 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
27281 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
27282 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
27283 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
27284 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
27285 {"crypto-neon-fp-armv8",
27286 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
27287 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
27288 {"crypto-neon-fp-armv8.1",
27289 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
27290 {NULL, ARM_ARCH_NONE}
27291 };
27292
27293 struct arm_option_value_table
27294 {
27295 const char *name;
27296 long value;
27297 };
27298
27299 static const struct arm_option_value_table arm_float_abis[] =
27300 {
27301 {"hard", ARM_FLOAT_ABI_HARD},
27302 {"softfp", ARM_FLOAT_ABI_SOFTFP},
27303 {"soft", ARM_FLOAT_ABI_SOFT},
27304 {NULL, 0}
27305 };
27306
27307 #ifdef OBJ_ELF
27308 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27309 static const struct arm_option_value_table arm_eabis[] =
27310 {
27311 {"gnu", EF_ARM_EABI_UNKNOWN},
27312 {"4", EF_ARM_EABI_VER4},
27313 {"5", EF_ARM_EABI_VER5},
27314 {NULL, 0}
27315 };
27316 #endif
27317
27318 struct arm_long_option_table
27319 {
27320 const char * option; /* Substring to match. */
27321 const char * help; /* Help information. */
27322 int (* func) (const char * subopt); /* Function to decode sub-option. */
27323 const char * deprecated; /* If non-null, print this message. */
27324 };
27325
27326 static bfd_boolean
27327 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
27328 arm_feature_set *ext_set,
27329 const struct arm_ext_table *ext_table)
27330 {
27331 /* We insist on extensions being specified in alphabetical order, and with
27332 extensions being added before being removed. We achieve this by having
27333 the global ARM_EXTENSIONS table in alphabetical order, and using the
27334 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27335 or removing it (0) and only allowing it to change in the order
27336 -1 -> 1 -> 0. */
27337 const struct arm_option_extension_value_table * opt = NULL;
27338 const arm_feature_set arm_any = ARM_ANY;
27339 int adding_value = -1;
27340
27341 while (str != NULL && *str != 0)
27342 {
27343 const char *ext;
27344 size_t len;
27345
27346 if (*str != '+')
27347 {
27348 as_bad (_("invalid architectural extension"));
27349 return FALSE;
27350 }
27351
27352 str++;
27353 ext = strchr (str, '+');
27354
27355 if (ext != NULL)
27356 len = ext - str;
27357 else
27358 len = strlen (str);
27359
27360 if (len >= 2 && strncmp (str, "no", 2) == 0)
27361 {
27362 if (adding_value != 0)
27363 {
27364 adding_value = 0;
27365 opt = arm_extensions;
27366 }
27367
27368 len -= 2;
27369 str += 2;
27370 }
27371 else if (len > 0)
27372 {
27373 if (adding_value == -1)
27374 {
27375 adding_value = 1;
27376 opt = arm_extensions;
27377 }
27378 else if (adding_value != 1)
27379 {
27380 as_bad (_("must specify extensions to add before specifying "
27381 "those to remove"));
27382 return FALSE;
27383 }
27384 }
27385
27386 if (len == 0)
27387 {
27388 as_bad (_("missing architectural extension"));
27389 return FALSE;
27390 }
27391
27392 gas_assert (adding_value != -1);
27393 gas_assert (opt != NULL);
27394
27395 if (ext_table != NULL)
27396 {
27397 const struct arm_ext_table * ext_opt = ext_table;
27398 bfd_boolean found = FALSE;
27399 for (; ext_opt->name != NULL; ext_opt++)
27400 if (ext_opt->name_len == len
27401 && strncmp (ext_opt->name, str, len) == 0)
27402 {
27403 if (adding_value)
27404 {
27405 if (ARM_FEATURE_ZERO (ext_opt->merge))
27406 /* TODO: Option not supported. When we remove the
27407 legacy table this case should error out. */
27408 continue;
27409
27410 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
27411 }
27412 else
27413 {
27414 if (ARM_FEATURE_ZERO (ext_opt->clear))
27415 /* TODO: Option not supported. When we remove the
27416 legacy table this case should error out. */
27417 continue;
27418 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
27419 }
27420 found = TRUE;
27421 break;
27422 }
27423 if (found)
27424 {
27425 str = ext;
27426 continue;
27427 }
27428 }
27429
27430 /* Scan over the options table trying to find an exact match. */
27431 for (; opt->name != NULL; opt++)
27432 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27433 {
27434 int i, nb_allowed_archs =
27435 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27436 /* Check we can apply the extension to this architecture. */
27437 for (i = 0; i < nb_allowed_archs; i++)
27438 {
27439 /* Empty entry. */
27440 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27441 continue;
27442 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
27443 break;
27444 }
27445 if (i == nb_allowed_archs)
27446 {
27447 as_bad (_("extension does not apply to the base architecture"));
27448 return FALSE;
27449 }
27450
27451 /* Add or remove the extension. */
27452 if (adding_value)
27453 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
27454 else
27455 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
27456
27457 /* Allowing Thumb division instructions for ARMv7 in autodetection
27458 rely on this break so that duplicate extensions (extensions
27459 with the same name as a previous extension in the list) are not
27460 considered for command-line parsing. */
27461 break;
27462 }
27463
27464 if (opt->name == NULL)
27465 {
27466 /* Did we fail to find an extension because it wasn't specified in
27467 alphabetical order, or because it does not exist? */
27468
27469 for (opt = arm_extensions; opt->name != NULL; opt++)
27470 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27471 break;
27472
27473 if (opt->name == NULL)
27474 as_bad (_("unknown architectural extension `%s'"), str);
27475 else
27476 as_bad (_("architectural extensions must be specified in "
27477 "alphabetical order"));
27478
27479 return FALSE;
27480 }
27481 else
27482 {
27483 /* We should skip the extension we've just matched the next time
27484 round. */
27485 opt++;
27486 }
27487
27488 str = ext;
27489 };
27490
27491 return TRUE;
27492 }
27493
27494 static bfd_boolean
27495 arm_parse_cpu (const char *str)
27496 {
27497 const struct arm_cpu_option_table *opt;
27498 const char *ext = strchr (str, '+');
27499 size_t len;
27500
27501 if (ext != NULL)
27502 len = ext - str;
27503 else
27504 len = strlen (str);
27505
27506 if (len == 0)
27507 {
27508 as_bad (_("missing cpu name `%s'"), str);
27509 return FALSE;
27510 }
27511
27512 for (opt = arm_cpus; opt->name != NULL; opt++)
27513 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27514 {
27515 mcpu_cpu_opt = &opt->value;
27516 if (mcpu_ext_opt == NULL)
27517 mcpu_ext_opt = XNEW (arm_feature_set);
27518 *mcpu_ext_opt = opt->ext;
27519 mcpu_fpu_opt = &opt->default_fpu;
27520 if (opt->canonical_name)
27521 {
27522 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
27523 strcpy (selected_cpu_name, opt->canonical_name);
27524 }
27525 else
27526 {
27527 size_t i;
27528
27529 if (len >= sizeof selected_cpu_name)
27530 len = (sizeof selected_cpu_name) - 1;
27531
27532 for (i = 0; i < len; i++)
27533 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27534 selected_cpu_name[i] = 0;
27535 }
27536
27537 if (ext != NULL)
27538 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
27539
27540 return TRUE;
27541 }
27542
27543 as_bad (_("unknown cpu `%s'"), str);
27544 return FALSE;
27545 }
27546
27547 static bfd_boolean
27548 arm_parse_arch (const char *str)
27549 {
27550 const struct arm_arch_option_table *opt;
27551 const char *ext = strchr (str, '+');
27552 size_t len;
27553
27554 if (ext != NULL)
27555 len = ext - str;
27556 else
27557 len = strlen (str);
27558
27559 if (len == 0)
27560 {
27561 as_bad (_("missing architecture name `%s'"), str);
27562 return FALSE;
27563 }
27564
27565 for (opt = arm_archs; opt->name != NULL; opt++)
27566 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27567 {
27568 march_cpu_opt = &opt->value;
27569 if (march_ext_opt == NULL)
27570 march_ext_opt = XNEW (arm_feature_set);
27571 *march_ext_opt = arm_arch_none;
27572 march_fpu_opt = &opt->default_fpu;
27573 strcpy (selected_cpu_name, opt->name);
27574
27575 if (ext != NULL)
27576 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
27577 opt->ext_table);
27578
27579 return TRUE;
27580 }
27581
27582 as_bad (_("unknown architecture `%s'\n"), str);
27583 return FALSE;
27584 }
27585
27586 static bfd_boolean
27587 arm_parse_fpu (const char * str)
27588 {
27589 const struct arm_option_fpu_value_table * opt;
27590
27591 for (opt = arm_fpus; opt->name != NULL; opt++)
27592 if (streq (opt->name, str))
27593 {
27594 mfpu_opt = &opt->value;
27595 return TRUE;
27596 }
27597
27598 as_bad (_("unknown floating point format `%s'\n"), str);
27599 return FALSE;
27600 }
27601
27602 static bfd_boolean
27603 arm_parse_float_abi (const char * str)
27604 {
27605 const struct arm_option_value_table * opt;
27606
27607 for (opt = arm_float_abis; opt->name != NULL; opt++)
27608 if (streq (opt->name, str))
27609 {
27610 mfloat_abi_opt = opt->value;
27611 return TRUE;
27612 }
27613
27614 as_bad (_("unknown floating point abi `%s'\n"), str);
27615 return FALSE;
27616 }
27617
27618 #ifdef OBJ_ELF
27619 static bfd_boolean
27620 arm_parse_eabi (const char * str)
27621 {
27622 const struct arm_option_value_table *opt;
27623
27624 for (opt = arm_eabis; opt->name != NULL; opt++)
27625 if (streq (opt->name, str))
27626 {
27627 meabi_flags = opt->value;
27628 return TRUE;
27629 }
27630 as_bad (_("unknown EABI `%s'\n"), str);
27631 return FALSE;
27632 }
27633 #endif
27634
27635 static bfd_boolean
27636 arm_parse_it_mode (const char * str)
27637 {
27638 bfd_boolean ret = TRUE;
27639
27640 if (streq ("arm", str))
27641 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
27642 else if (streq ("thumb", str))
27643 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
27644 else if (streq ("always", str))
27645 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
27646 else if (streq ("never", str))
27647 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
27648 else
27649 {
27650 as_bad (_("unknown implicit IT mode `%s', should be "\
27651 "arm, thumb, always, or never."), str);
27652 ret = FALSE;
27653 }
27654
27655 return ret;
27656 }
27657
27658 static bfd_boolean
27659 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
27660 {
27661 codecomposer_syntax = TRUE;
27662 arm_comment_chars[0] = ';';
27663 arm_line_separator_chars[0] = 0;
27664 return TRUE;
27665 }
27666
27667 struct arm_long_option_table arm_long_opts[] =
27668 {
27669 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27670 arm_parse_cpu, NULL},
27671 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27672 arm_parse_arch, NULL},
27673 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27674 arm_parse_fpu, NULL},
27675 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27676 arm_parse_float_abi, NULL},
27677 #ifdef OBJ_ELF
27678 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27679 arm_parse_eabi, NULL},
27680 #endif
27681 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27682 arm_parse_it_mode, NULL},
27683 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27684 arm_ccs_mode, NULL},
27685 {NULL, NULL, 0, NULL}
27686 };
27687
27688 int
27689 md_parse_option (int c, const char * arg)
27690 {
27691 struct arm_option_table *opt;
27692 const struct arm_legacy_option_table *fopt;
27693 struct arm_long_option_table *lopt;
27694
27695 switch (c)
27696 {
27697 #ifdef OPTION_EB
27698 case OPTION_EB:
27699 target_big_endian = 1;
27700 break;
27701 #endif
27702
27703 #ifdef OPTION_EL
27704 case OPTION_EL:
27705 target_big_endian = 0;
27706 break;
27707 #endif
27708
27709 case OPTION_FIX_V4BX:
27710 fix_v4bx = TRUE;
27711 break;
27712
27713 #ifdef OBJ_ELF
27714 case OPTION_FDPIC:
27715 arm_fdpic = TRUE;
27716 break;
27717 #endif /* OBJ_ELF */
27718
27719 case 'a':
27720 /* Listing option. Just ignore these, we don't support additional
27721 ones. */
27722 return 0;
27723
27724 default:
27725 for (opt = arm_opts; opt->option != NULL; opt++)
27726 {
27727 if (c == opt->option[0]
27728 && ((arg == NULL && opt->option[1] == 0)
27729 || streq (arg, opt->option + 1)))
27730 {
27731 /* If the option is deprecated, tell the user. */
27732 if (warn_on_deprecated && opt->deprecated != NULL)
27733 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27734 arg ? arg : "", _(opt->deprecated));
27735
27736 if (opt->var != NULL)
27737 *opt->var = opt->value;
27738
27739 return 1;
27740 }
27741 }
27742
27743 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
27744 {
27745 if (c == fopt->option[0]
27746 && ((arg == NULL && fopt->option[1] == 0)
27747 || streq (arg, fopt->option + 1)))
27748 {
27749 /* If the option is deprecated, tell the user. */
27750 if (warn_on_deprecated && fopt->deprecated != NULL)
27751 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27752 arg ? arg : "", _(fopt->deprecated));
27753
27754 if (fopt->var != NULL)
27755 *fopt->var = &fopt->value;
27756
27757 return 1;
27758 }
27759 }
27760
27761 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
27762 {
27763 /* These options are expected to have an argument. */
27764 if (c == lopt->option[0]
27765 && arg != NULL
27766 && strncmp (arg, lopt->option + 1,
27767 strlen (lopt->option + 1)) == 0)
27768 {
27769 /* If the option is deprecated, tell the user. */
27770 if (warn_on_deprecated && lopt->deprecated != NULL)
27771 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
27772 _(lopt->deprecated));
27773
27774 /* Call the sup-option parser. */
27775 return lopt->func (arg + strlen (lopt->option) - 1);
27776 }
27777 }
27778
27779 return 0;
27780 }
27781
27782 return 1;
27783 }
27784
27785 void
27786 md_show_usage (FILE * fp)
27787 {
27788 struct arm_option_table *opt;
27789 struct arm_long_option_table *lopt;
27790
27791 fprintf (fp, _(" ARM-specific assembler options:\n"));
27792
27793 for (opt = arm_opts; opt->option != NULL; opt++)
27794 if (opt->help != NULL)
27795 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
27796
27797 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
27798 if (lopt->help != NULL)
27799 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
27800
27801 #ifdef OPTION_EB
27802 fprintf (fp, _("\
27803 -EB assemble code for a big-endian cpu\n"));
27804 #endif
27805
27806 #ifdef OPTION_EL
27807 fprintf (fp, _("\
27808 -EL assemble code for a little-endian cpu\n"));
27809 #endif
27810
27811 fprintf (fp, _("\
27812 --fix-v4bx Allow BX in ARMv4 code\n"));
27813
27814 #ifdef OBJ_ELF
27815 fprintf (fp, _("\
27816 --fdpic generate an FDPIC object file\n"));
27817 #endif /* OBJ_ELF */
27818 }
27819
27820 #ifdef OBJ_ELF
27821
27822 typedef struct
27823 {
27824 int val;
27825 arm_feature_set flags;
27826 } cpu_arch_ver_table;
27827
27828 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
27829 chronologically for architectures, with an exception for ARMv6-M and
27830 ARMv6S-M due to legacy reasons. No new architecture should have a
27831 special case. This allows for build attribute selection results to be
27832 stable when new architectures are added. */
27833 static const cpu_arch_ver_table cpu_arch_ver[] =
27834 {
27835 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
27836 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
27837 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
27838 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
27839 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
27840 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
27841 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
27842 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
27843 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
27844 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
27845 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
27846 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
27847 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
27848 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
27849 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
27850 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
27851 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
27852 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
27853 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
27854 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
27855 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
27856 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
27857 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
27858 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
27859
27860 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
27861 always selected build attributes to match those of ARMv6-M
27862 (resp. ARMv6S-M). However, due to these architectures being a strict
27863 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
27864 would be selected when fully respecting chronology of architectures.
27865 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
27866 move them before ARMv7 architectures. */
27867 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
27868 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
27869
27870 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
27871 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
27872 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
27873 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
27874 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
27875 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
27876 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
27877 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
27878 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
27879 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
27880 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
27881 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
27882 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
27883 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
27884 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
27885 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
27886 {-1, ARM_ARCH_NONE}
27887 };
27888
27889 /* Set an attribute if it has not already been set by the user. */
27890
27891 static void
27892 aeabi_set_attribute_int (int tag, int value)
27893 {
27894 if (tag < 1
27895 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27896 || !attributes_set_explicitly[tag])
27897 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
27898 }
27899
27900 static void
27901 aeabi_set_attribute_string (int tag, const char *value)
27902 {
27903 if (tag < 1
27904 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27905 || !attributes_set_explicitly[tag])
27906 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
27907 }
27908
27909 /* Return whether features in the *NEEDED feature set are available via
27910 extensions for the architecture whose feature set is *ARCH_FSET. */
27911
27912 static bfd_boolean
27913 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
27914 const arm_feature_set *needed)
27915 {
27916 int i, nb_allowed_archs;
27917 arm_feature_set ext_fset;
27918 const struct arm_option_extension_value_table *opt;
27919
27920 ext_fset = arm_arch_none;
27921 for (opt = arm_extensions; opt->name != NULL; opt++)
27922 {
27923 /* Extension does not provide any feature we need. */
27924 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
27925 continue;
27926
27927 nb_allowed_archs =
27928 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27929 for (i = 0; i < nb_allowed_archs; i++)
27930 {
27931 /* Empty entry. */
27932 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
27933 break;
27934
27935 /* Extension is available, add it. */
27936 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
27937 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
27938 }
27939 }
27940
27941 /* Can we enable all features in *needed? */
27942 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
27943 }
27944
27945 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27946 a given architecture feature set *ARCH_EXT_FSET including extension feature
27947 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27948 - if true, check for an exact match of the architecture modulo extensions;
27949 - otherwise, select build attribute value of the first superset
27950 architecture released so that results remains stable when new architectures
27951 are added.
27952 For -march/-mcpu=all the build attribute value of the most featureful
27953 architecture is returned. Tag_CPU_arch_profile result is returned in
27954 PROFILE. */
27955
27956 static int
27957 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
27958 const arm_feature_set *ext_fset,
27959 char *profile, int exact_match)
27960 {
27961 arm_feature_set arch_fset;
27962 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
27963
27964 /* Select most featureful architecture with all its extensions if building
27965 for -march=all as the feature sets used to set build attributes. */
27966 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
27967 {
27968 /* Force revisiting of decision for each new architecture. */
27969 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
27970 *profile = 'A';
27971 return TAG_CPU_ARCH_V8;
27972 }
27973
27974 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
27975
27976 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
27977 {
27978 arm_feature_set known_arch_fset;
27979
27980 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27981 if (exact_match)
27982 {
27983 /* Base architecture match user-specified architecture and
27984 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27985 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27986 {
27987 p_ver_ret = p_ver;
27988 goto found;
27989 }
27990 /* Base architecture match user-specified architecture only
27991 (eg. ARMv6-M in the same case as above). Record it in case we
27992 find a match with above condition. */
27993 else if (p_ver_ret == NULL
27994 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27995 p_ver_ret = p_ver;
27996 }
27997 else
27998 {
27999
28000 /* Architecture has all features wanted. */
28001 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
28002 {
28003 arm_feature_set added_fset;
28004
28005 /* Compute features added by this architecture over the one
28006 recorded in p_ver_ret. */
28007 if (p_ver_ret != NULL)
28008 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
28009 p_ver_ret->flags);
28010 /* First architecture that match incl. with extensions, or the
28011 only difference in features over the recorded match is
28012 features that were optional and are now mandatory. */
28013 if (p_ver_ret == NULL
28014 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
28015 {
28016 p_ver_ret = p_ver;
28017 goto found;
28018 }
28019 }
28020 else if (p_ver_ret == NULL)
28021 {
28022 arm_feature_set needed_ext_fset;
28023
28024 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
28025
28026 /* Architecture has all features needed when using some
28027 extensions. Record it and continue searching in case there
28028 exist an architecture providing all needed features without
28029 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28030 OS extension). */
28031 if (have_ext_for_needed_feat_p (&known_arch_fset,
28032 &needed_ext_fset))
28033 p_ver_ret = p_ver;
28034 }
28035 }
28036 }
28037
28038 if (p_ver_ret == NULL)
28039 return -1;
28040
28041 found:
28042 /* Tag_CPU_arch_profile. */
28043 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
28044 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
28045 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
28046 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
28047 *profile = 'A';
28048 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
28049 *profile = 'R';
28050 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
28051 *profile = 'M';
28052 else
28053 *profile = '\0';
28054 return p_ver_ret->val;
28055 }
28056
28057 /* Set the public EABI object attributes. */
28058
28059 static void
28060 aeabi_set_public_attributes (void)
28061 {
28062 char profile = '\0';
28063 int arch = -1;
28064 int virt_sec = 0;
28065 int fp16_optional = 0;
28066 int skip_exact_match = 0;
28067 arm_feature_set flags, flags_arch, flags_ext;
28068
28069 /* Autodetection mode, choose the architecture based the instructions
28070 actually used. */
28071 if (no_cpu_selected ())
28072 {
28073 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
28074
28075 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
28076 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
28077
28078 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
28079 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
28080
28081 /* Code run during relaxation relies on selected_cpu being set. */
28082 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28083 flags_ext = arm_arch_none;
28084 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
28085 selected_ext = flags_ext;
28086 selected_cpu = flags;
28087 }
28088 /* Otherwise, choose the architecture based on the capabilities of the
28089 requested cpu. */
28090 else
28091 {
28092 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
28093 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
28094 flags_ext = selected_ext;
28095 flags = selected_cpu;
28096 }
28097 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
28098
28099 /* Allow the user to override the reported architecture. */
28100 if (!ARM_FEATURE_ZERO (selected_object_arch))
28101 {
28102 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
28103 flags_ext = arm_arch_none;
28104 }
28105 else
28106 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
28107
28108 /* When this function is run again after relaxation has happened there is no
28109 way to determine whether an architecture or CPU was specified by the user:
28110 - selected_cpu is set above for relaxation to work;
28111 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28112 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28113 Therefore, if not in -march=all case we first try an exact match and fall
28114 back to autodetection. */
28115 if (!skip_exact_match)
28116 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
28117 if (arch == -1)
28118 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
28119 if (arch == -1)
28120 as_bad (_("no architecture contains all the instructions used\n"));
28121
28122 /* Tag_CPU_name. */
28123 if (selected_cpu_name[0])
28124 {
28125 char *q;
28126
28127 q = selected_cpu_name;
28128 if (strncmp (q, "armv", 4) == 0)
28129 {
28130 int i;
28131
28132 q += 4;
28133 for (i = 0; q[i]; i++)
28134 q[i] = TOUPPER (q[i]);
28135 }
28136 aeabi_set_attribute_string (Tag_CPU_name, q);
28137 }
28138
28139 /* Tag_CPU_arch. */
28140 aeabi_set_attribute_int (Tag_CPU_arch, arch);
28141
28142 /* Tag_CPU_arch_profile. */
28143 if (profile != '\0')
28144 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
28145
28146 /* Tag_DSP_extension. */
28147 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
28148 aeabi_set_attribute_int (Tag_DSP_extension, 1);
28149
28150 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28151 /* Tag_ARM_ISA_use. */
28152 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
28153 || ARM_FEATURE_ZERO (flags_arch))
28154 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
28155
28156 /* Tag_THUMB_ISA_use. */
28157 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
28158 || ARM_FEATURE_ZERO (flags_arch))
28159 {
28160 int thumb_isa_use;
28161
28162 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28163 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
28164 thumb_isa_use = 3;
28165 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
28166 thumb_isa_use = 2;
28167 else
28168 thumb_isa_use = 1;
28169 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
28170 }
28171
28172 /* Tag_VFP_arch. */
28173 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
28174 aeabi_set_attribute_int (Tag_VFP_arch,
28175 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28176 ? 7 : 8);
28177 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
28178 aeabi_set_attribute_int (Tag_VFP_arch,
28179 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28180 ? 5 : 6);
28181 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
28182 {
28183 fp16_optional = 1;
28184 aeabi_set_attribute_int (Tag_VFP_arch, 3);
28185 }
28186 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
28187 {
28188 aeabi_set_attribute_int (Tag_VFP_arch, 4);
28189 fp16_optional = 1;
28190 }
28191 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
28192 aeabi_set_attribute_int (Tag_VFP_arch, 2);
28193 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
28194 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
28195 aeabi_set_attribute_int (Tag_VFP_arch, 1);
28196
28197 /* Tag_ABI_HardFP_use. */
28198 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
28199 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
28200 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
28201
28202 /* Tag_WMMX_arch. */
28203 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
28204 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
28205 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
28206 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
28207
28208 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28209 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
28210 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
28211 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
28212 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
28213 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
28214 {
28215 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
28216 {
28217 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
28218 }
28219 else
28220 {
28221 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
28222 fp16_optional = 1;
28223 }
28224 }
28225
28226 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28227 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
28228 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
28229
28230 /* Tag_DIV_use.
28231
28232 We set Tag_DIV_use to two when integer divide instructions have been used
28233 in ARM state, or when Thumb integer divide instructions have been used,
28234 but we have no architecture profile set, nor have we any ARM instructions.
28235
28236 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28237 by the base architecture.
28238
28239 For new architectures we will have to check these tests. */
28240 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
28241 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28242 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
28243 aeabi_set_attribute_int (Tag_DIV_use, 0);
28244 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
28245 || (profile == '\0'
28246 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
28247 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
28248 aeabi_set_attribute_int (Tag_DIV_use, 2);
28249
28250 /* Tag_MP_extension_use. */
28251 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
28252 aeabi_set_attribute_int (Tag_MPextension_use, 1);
28253
28254 /* Tag Virtualization_use. */
28255 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
28256 virt_sec |= 1;
28257 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
28258 virt_sec |= 2;
28259 if (virt_sec != 0)
28260 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
28261 }
28262
28263 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28264 finished and free extension feature bits which will not be used anymore. */
28265
28266 void
28267 arm_md_post_relax (void)
28268 {
28269 aeabi_set_public_attributes ();
28270 XDELETE (mcpu_ext_opt);
28271 mcpu_ext_opt = NULL;
28272 XDELETE (march_ext_opt);
28273 march_ext_opt = NULL;
28274 }
28275
28276 /* Add the default contents for the .ARM.attributes section. */
28277
28278 void
28279 arm_md_end (void)
28280 {
28281 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
28282 return;
28283
28284 aeabi_set_public_attributes ();
28285 }
28286 #endif /* OBJ_ELF */
28287
28288 /* Parse a .cpu directive. */
28289
28290 static void
28291 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
28292 {
28293 const struct arm_cpu_option_table *opt;
28294 char *name;
28295 char saved_char;
28296
28297 name = input_line_pointer;
28298 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28299 input_line_pointer++;
28300 saved_char = *input_line_pointer;
28301 *input_line_pointer = 0;
28302
28303 /* Skip the first "all" entry. */
28304 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
28305 if (streq (opt->name, name))
28306 {
28307 selected_arch = opt->value;
28308 selected_ext = opt->ext;
28309 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28310 if (opt->canonical_name)
28311 strcpy (selected_cpu_name, opt->canonical_name);
28312 else
28313 {
28314 int i;
28315 for (i = 0; opt->name[i]; i++)
28316 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28317
28318 selected_cpu_name[i] = 0;
28319 }
28320 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28321
28322 *input_line_pointer = saved_char;
28323 demand_empty_rest_of_line ();
28324 return;
28325 }
28326 as_bad (_("unknown cpu `%s'"), name);
28327 *input_line_pointer = saved_char;
28328 ignore_rest_of_line ();
28329 }
28330
28331 /* Parse a .arch directive. */
28332
28333 static void
28334 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
28335 {
28336 const struct arm_arch_option_table *opt;
28337 char saved_char;
28338 char *name;
28339
28340 name = input_line_pointer;
28341 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28342 input_line_pointer++;
28343 saved_char = *input_line_pointer;
28344 *input_line_pointer = 0;
28345
28346 /* Skip the first "all" entry. */
28347 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28348 if (streq (opt->name, name))
28349 {
28350 selected_arch = opt->value;
28351 selected_ext = arm_arch_none;
28352 selected_cpu = selected_arch;
28353 strcpy (selected_cpu_name, opt->name);
28354 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28355 *input_line_pointer = saved_char;
28356 demand_empty_rest_of_line ();
28357 return;
28358 }
28359
28360 as_bad (_("unknown architecture `%s'\n"), name);
28361 *input_line_pointer = saved_char;
28362 ignore_rest_of_line ();
28363 }
28364
28365 /* Parse a .object_arch directive. */
28366
28367 static void
28368 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
28369 {
28370 const struct arm_arch_option_table *opt;
28371 char saved_char;
28372 char *name;
28373
28374 name = input_line_pointer;
28375 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28376 input_line_pointer++;
28377 saved_char = *input_line_pointer;
28378 *input_line_pointer = 0;
28379
28380 /* Skip the first "all" entry. */
28381 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28382 if (streq (opt->name, name))
28383 {
28384 selected_object_arch = opt->value;
28385 *input_line_pointer = saved_char;
28386 demand_empty_rest_of_line ();
28387 return;
28388 }
28389
28390 as_bad (_("unknown architecture `%s'\n"), name);
28391 *input_line_pointer = saved_char;
28392 ignore_rest_of_line ();
28393 }
28394
28395 /* Parse a .arch_extension directive. */
28396
28397 static void
28398 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
28399 {
28400 const struct arm_option_extension_value_table *opt;
28401 char saved_char;
28402 char *name;
28403 int adding_value = 1;
28404
28405 name = input_line_pointer;
28406 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28407 input_line_pointer++;
28408 saved_char = *input_line_pointer;
28409 *input_line_pointer = 0;
28410
28411 if (strlen (name) >= 2
28412 && strncmp (name, "no", 2) == 0)
28413 {
28414 adding_value = 0;
28415 name += 2;
28416 }
28417
28418 for (opt = arm_extensions; opt->name != NULL; opt++)
28419 if (streq (opt->name, name))
28420 {
28421 int i, nb_allowed_archs =
28422 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
28423 for (i = 0; i < nb_allowed_archs; i++)
28424 {
28425 /* Empty entry. */
28426 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
28427 continue;
28428 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
28429 break;
28430 }
28431
28432 if (i == nb_allowed_archs)
28433 {
28434 as_bad (_("architectural extension `%s' is not allowed for the "
28435 "current base architecture"), name);
28436 break;
28437 }
28438
28439 if (adding_value)
28440 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
28441 opt->merge_value);
28442 else
28443 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
28444
28445 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28446 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28447 *input_line_pointer = saved_char;
28448 demand_empty_rest_of_line ();
28449 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28450 on this return so that duplicate extensions (extensions with the
28451 same name as a previous extension in the list) are not considered
28452 for command-line parsing. */
28453 return;
28454 }
28455
28456 if (opt->name == NULL)
28457 as_bad (_("unknown architecture extension `%s'\n"), name);
28458
28459 *input_line_pointer = saved_char;
28460 ignore_rest_of_line ();
28461 }
28462
28463 /* Parse a .fpu directive. */
28464
28465 static void
28466 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
28467 {
28468 const struct arm_option_fpu_value_table *opt;
28469 char saved_char;
28470 char *name;
28471
28472 name = input_line_pointer;
28473 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28474 input_line_pointer++;
28475 saved_char = *input_line_pointer;
28476 *input_line_pointer = 0;
28477
28478 for (opt = arm_fpus; opt->name != NULL; opt++)
28479 if (streq (opt->name, name))
28480 {
28481 selected_fpu = opt->value;
28482 #ifndef CPU_DEFAULT
28483 if (no_cpu_selected ())
28484 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
28485 else
28486 #endif
28487 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28488 *input_line_pointer = saved_char;
28489 demand_empty_rest_of_line ();
28490 return;
28491 }
28492
28493 as_bad (_("unknown floating point format `%s'\n"), name);
28494 *input_line_pointer = saved_char;
28495 ignore_rest_of_line ();
28496 }
28497
28498 /* Copy symbol information. */
28499
28500 void
28501 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
28502 {
28503 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
28504 }
28505
28506 #ifdef OBJ_ELF
28507 /* Given a symbolic attribute NAME, return the proper integer value.
28508 Returns -1 if the attribute is not known. */
28509
28510 int
28511 arm_convert_symbolic_attribute (const char *name)
28512 {
28513 static const struct
28514 {
28515 const char * name;
28516 const int tag;
28517 }
28518 attribute_table[] =
28519 {
28520 /* When you modify this table you should
28521 also modify the list in doc/c-arm.texi. */
28522 #define T(tag) {#tag, tag}
28523 T (Tag_CPU_raw_name),
28524 T (Tag_CPU_name),
28525 T (Tag_CPU_arch),
28526 T (Tag_CPU_arch_profile),
28527 T (Tag_ARM_ISA_use),
28528 T (Tag_THUMB_ISA_use),
28529 T (Tag_FP_arch),
28530 T (Tag_VFP_arch),
28531 T (Tag_WMMX_arch),
28532 T (Tag_Advanced_SIMD_arch),
28533 T (Tag_PCS_config),
28534 T (Tag_ABI_PCS_R9_use),
28535 T (Tag_ABI_PCS_RW_data),
28536 T (Tag_ABI_PCS_RO_data),
28537 T (Tag_ABI_PCS_GOT_use),
28538 T (Tag_ABI_PCS_wchar_t),
28539 T (Tag_ABI_FP_rounding),
28540 T (Tag_ABI_FP_denormal),
28541 T (Tag_ABI_FP_exceptions),
28542 T (Tag_ABI_FP_user_exceptions),
28543 T (Tag_ABI_FP_number_model),
28544 T (Tag_ABI_align_needed),
28545 T (Tag_ABI_align8_needed),
28546 T (Tag_ABI_align_preserved),
28547 T (Tag_ABI_align8_preserved),
28548 T (Tag_ABI_enum_size),
28549 T (Tag_ABI_HardFP_use),
28550 T (Tag_ABI_VFP_args),
28551 T (Tag_ABI_WMMX_args),
28552 T (Tag_ABI_optimization_goals),
28553 T (Tag_ABI_FP_optimization_goals),
28554 T (Tag_compatibility),
28555 T (Tag_CPU_unaligned_access),
28556 T (Tag_FP_HP_extension),
28557 T (Tag_VFP_HP_extension),
28558 T (Tag_ABI_FP_16bit_format),
28559 T (Tag_MPextension_use),
28560 T (Tag_DIV_use),
28561 T (Tag_nodefaults),
28562 T (Tag_also_compatible_with),
28563 T (Tag_conformance),
28564 T (Tag_T2EE_use),
28565 T (Tag_Virtualization_use),
28566 T (Tag_DSP_extension),
28567 /* We deliberately do not include Tag_MPextension_use_legacy. */
28568 #undef T
28569 };
28570 unsigned int i;
28571
28572 if (name == NULL)
28573 return -1;
28574
28575 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
28576 if (streq (name, attribute_table[i].name))
28577 return attribute_table[i].tag;
28578
28579 return -1;
28580 }
28581
28582 /* Apply sym value for relocations only in the case that they are for
28583 local symbols in the same segment as the fixup and you have the
28584 respective architectural feature for blx and simple switches. */
28585
28586 int
28587 arm_apply_sym_value (struct fix * fixP, segT this_seg)
28588 {
28589 if (fixP->fx_addsy
28590 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28591 /* PR 17444: If the local symbol is in a different section then a reloc
28592 will always be generated for it, so applying the symbol value now
28593 will result in a double offset being stored in the relocation. */
28594 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
28595 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
28596 {
28597 switch (fixP->fx_r_type)
28598 {
28599 case BFD_RELOC_ARM_PCREL_BLX:
28600 case BFD_RELOC_THUMB_PCREL_BRANCH23:
28601 if (ARM_IS_FUNC (fixP->fx_addsy))
28602 return 1;
28603 break;
28604
28605 case BFD_RELOC_ARM_PCREL_CALL:
28606 case BFD_RELOC_THUMB_PCREL_BLX:
28607 if (THUMB_IS_FUNC (fixP->fx_addsy))
28608 return 1;
28609 break;
28610
28611 default:
28612 break;
28613 }
28614
28615 }
28616 return 0;
28617 }
28618 #endif /* OBJ_ELF */
This page took 1.080243 seconds and 5 git commands to generate.